file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
ExportGltf.ts
|
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorTexture: { index: GltfGlobals.gltf.textures.length - 1 },
baseColorFactor: [1, 1, 1, 1],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.textureToMaterialMap.set(textureId, result);
return result;
}
function findOrAddMaterialIndexForColor(color: number): number {
let result = GltfGlobals.colorToMaterialMap.get(color);
if (result !== undefined) return result;
const rgb = ColorDef.getColors(color);
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorFactor: [rgb.r / 255, rgb.g / 255, rgb.b / 255, (255 - rgb.t) / 255],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
if (rgb.t > 10) material.alphaMode = "BLEND";
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.colorToMaterialMap.set(color, result);
return result;
}
function addMeshIndices(indices: Int32Array) {
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length,
byteOffset: 0,
componentType: AccessorComponentType.UInt32,
count: indices.length,
type: "SCALAR",
});
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ElementArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: indices.byteLength,
});
GltfGlobals.binBytesWritten += indices.byteLength;
fs.writeSync(GltfGlobals.binFile, indices);
}
function addMeshPointsAndNormals(points: Float64Array, normals: Float32Array) {
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(points.length);
for (let i = 0; i < points.length; i += 3)
convertPoint(outPoints, i, points[i], points[i + 1], points[i + 2]);
const outNormals = new Float32Array(normals.length);
for (let i = 0; i < normals.length; i += 3)
convertPoint(outNormals, i, normals[i], normals[i + 1], normals[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength + outNormals.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
fs.writeSync(GltfGlobals.binFile, outNormals);
GltfGlobals.binBytesWritten += outPoints.byteLength + outNormals.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: outPoints.byteLength,
componentType: AccessorComponentType.Float,
count: outNormals.length / 3,
type: "VEC3",
});
}
function addMeshParams(params: Float32Array) {
const outParams = new Float32Array(params.length);
for (let i = 0; i < params.length; i += 2) {
outParams[i] = params[i];
outParams[i + 1] = 1 - params[i + 1]; // Flip to match GLTF spec
}
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outParams.byteLength,
byteStride: 8,
});
fs.writeSync(GltfGlobals.binFile, outParams);
GltfGlobals.binBytesWritten += outParams.byteLength;
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outParams.length / 2,
type: "VEC2",
});
}
function addMesh(mesh: ExportGraphicsMesh, color: number, textureId?: Id64String) {
const material = textureId !== undefined ? findOrAddMaterialIndexForTexture(textureId) :
findOrAddMaterialIndexForColor(color);
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlTriangles,
material,
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
// eslint-disable-next-line @typescript-eslint/naming-convention
NORMAL: GltfGlobals.gltf.accessors.length + 2,
},
};
if (textureId !== undefined)
primitive.attributes.TEXCOORD_0 = GltfGlobals.gltf.accessors.length + 3;
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(mesh.indices);
addMeshPointsAndNormals(mesh.points, mesh.normals);
if (textureId !== undefined) addMeshParams(mesh.params);
}
function addMeshNode(name: string) {
GltfGlobals.gltf.scenes[0].nodes.push(GltfGlobals.gltf.nodes.length);
GltfGlobals.gltf.nodes.push({ name, mesh: GltfGlobals.gltf.meshes.length });
}
function addLines(lines: ExportGraphicsLines, color: number) {
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlLines,
material: findOrAddMaterialIndexForColor(color),
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
},
};
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(lines.indices);
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (
|
{
let result = GltfGlobals.textureToMaterialMap.get(textureId);
if (result !== undefined) return result;
// glTF-Validator complains if textures/images are defined but empty - wait for texture to define.
if (GltfGlobals.gltf.textures === undefined) {
GltfGlobals.gltf.textures = [];
GltfGlobals.gltf.images = [];
GltfGlobals.gltf.samplers = [{}]; // Just use default sampler values
}
const textureInfo = GltfGlobals.iModel.elements.getElement<Texture>(textureId);
const textureName = textureId + (textureInfo.format === ImageSourceFormat.Jpeg ? ".jpg" : ".png");
const texturePath = path.join(GltfGlobals.texturesDir, textureName);
fs.writeFile(texturePath, textureInfo.data, () => { }); // async is fine
const texture: GltfTexture = { source: GltfGlobals.gltf.images!.length, sampler: 0 };
GltfGlobals.gltf.textures.push(texture);
GltfGlobals.gltf.images!.push({ uri: textureName });
|
identifier_body
|
|
ExportGltf.ts
|
.data, () => { }); // async is fine
const texture: GltfTexture = { source: GltfGlobals.gltf.images!.length, sampler: 0 };
GltfGlobals.gltf.textures.push(texture);
GltfGlobals.gltf.images!.push({ uri: textureName });
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorTexture: { index: GltfGlobals.gltf.textures.length - 1 },
baseColorFactor: [1, 1, 1, 1],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.textureToMaterialMap.set(textureId, result);
return result;
}
function findOrAddMaterialIndexForColor(color: number): number {
let result = GltfGlobals.colorToMaterialMap.get(color);
if (result !== undefined) return result;
const rgb = ColorDef.getColors(color);
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorFactor: [rgb.r / 255, rgb.g / 255, rgb.b / 255, (255 - rgb.t) / 255],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
if (rgb.t > 10) material.alphaMode = "BLEND";
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.colorToMaterialMap.set(color, result);
return result;
}
function addMeshIndices(indices: Int32Array) {
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length,
byteOffset: 0,
componentType: AccessorComponentType.UInt32,
count: indices.length,
type: "SCALAR",
});
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ElementArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: indices.byteLength,
});
GltfGlobals.binBytesWritten += indices.byteLength;
fs.writeSync(GltfGlobals.binFile, indices);
}
function addMeshPointsAndNormals(points: Float64Array, normals: Float32Array) {
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(points.length);
for (let i = 0; i < points.length; i += 3)
convertPoint(outPoints, i, points[i], points[i + 1], points[i + 2]);
const outNormals = new Float32Array(normals.length);
for (let i = 0; i < normals.length; i += 3)
convertPoint(outNormals, i, normals[i], normals[i + 1], normals[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength + outNormals.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
fs.writeSync(GltfGlobals.binFile, outNormals);
GltfGlobals.binBytesWritten += outPoints.byteLength + outNormals.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: outPoints.byteLength,
componentType: AccessorComponentType.Float,
count: outNormals.length / 3,
type: "VEC3",
});
}
function addMeshParams(params: Float32Array) {
const outParams = new Float32Array(params.length);
for (let i = 0; i < params.length; i += 2) {
outParams[i] = params[i];
outParams[i + 1] = 1 - params[i + 1]; // Flip to match GLTF spec
}
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outParams.byteLength,
byteStride: 8,
});
fs.writeSync(GltfGlobals.binFile, outParams);
GltfGlobals.binBytesWritten += outParams.byteLength;
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outParams.length / 2,
type: "VEC2",
});
}
|
findOrAddMaterialIndexForColor(color);
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlTriangles,
material,
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
// eslint-disable-next-line @typescript-eslint/naming-convention
NORMAL: GltfGlobals.gltf.accessors.length + 2,
},
};
if (textureId !== undefined)
primitive.attributes.TEXCOORD_0 = GltfGlobals.gltf.accessors.length + 3;
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(mesh.indices);
addMeshPointsAndNormals(mesh.points, mesh.normals);
if (textureId !== undefined) addMeshParams(mesh.params);
}
function addMeshNode(name: string) {
GltfGlobals.gltf.scenes[0].nodes.push(GltfGlobals.gltf.nodes.length);
GltfGlobals.gltf.nodes.push({ name, mesh: GltfGlobals.gltf.meshes.length });
}
function addLines(lines: ExportGraphicsLines, color: number) {
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlLines,
material: findOrAddMaterialIndexForColor(color),
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
},
};
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(lines.indices);
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(lines.points.length);
for (let i = 0; i < outPoints.length; i += 3)
convertPoint(outPoints, i, lines.points[i], lines.points[i + 1], lines.points[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength,
byteStride: 12,
|
function addMesh(mesh: ExportGraphicsMesh, color: number, textureId?: Id64String) {
const material = textureId !== undefined ? findOrAddMaterialIndexForTexture(textureId) :
|
random_line_split
|
v2.py
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import txt2csv as t2csv
import glob, os, re
from measurements import perform_filter
OWNER = 'rn'
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog)
def retranslateUi(self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
return "70;20"
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os
|
#
# WARNING! All changes made in this file will be lost!
import os, sys
from PyQt4 import QtCore, QtGui
|
random_line_split
|
|
v2.py
|
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog)
def
|
(self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
return "70;20"
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os.system("chown " + OWNER + ":" + OWNER + " pcap/*;su -c 'bash filter_tcp.sh' -s /bin/sh rn")
t2csv.ignite(trainer='tcp')
self.statusLabel.setText("Dumping Frametime delta done \n Calculating Features Now...")
flabel = open("label_train", "w") # Open file to write labels into
fdata = open("data_train
|
retranslateUi
|
identifier_name
|
v2.py
|
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
|
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog)
def retranslateUi(self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
return "70;20"
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os.system("chown " + OWNER + ":" + OWNER + " pcap/*;su -c 'bash filter_tcp.sh' -s /bin/sh rn")
t2csv.ignite(trainer='tcp')
self.statusLabel.setText("Dumping Frametime delta done \n Calculating Features Now...")
flabel = open("label_train", "w") # Open file to write labels into
fdata = open("data_train",
|
profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
|
identifier_body
|
v2.py
|
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog)
def retranslateUi(self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
|
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os.system("chown " + OWNER + ":" + OWNER + " pcap/*;su -c 'bash filter_tcp.sh' -s /bin/sh rn")
t2csv.ignite(trainer='tcp')
self.statusLabel.setText("Dumping Frametime delta done \n Calculating Features Now...")
flabel = open("label_train", "w") # Open file to write labels into
fdata = open("data_train
|
return "70;20"
|
conditional_block
|
forminput.js
|
(function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/",
success: function(data) {
//select what should be shown
if (data != false) {
if (show == true) { graphShow(data) } else { chartShow(data) }
//stop checking if preprocessing script is finished
clearInterval(check);
return;
}
}
});
}, 2000);
}
}
//show graph with d3
var graphShow = function(jsonString) {
$("#pic").html("");
$("#pic").append(
"<svg style='width:100%; height:100%;'>" +
"</svg>"
);
var zoom = d3.zoom();
//defining display window
var svg = d3.select("svg"),
width = document.getElementById("pic").clientWidth,
height = document.getElementById("pic").clientHeight
//make svg zoomable
transform = d3.zoomIdentity;
//select svg to container for better zooming functionality
var container = svg.append("g")
.attr("class", "container");
//function for generating different colors depending on the word cluster
var color = d3.scaleOrdinal(d3.schemeCategory20c);
//defining the standard radius of the nodes
var radius = d3.scaleSqrt()
.range([0, 6]);
//simulation of the nodes and links: What kind of forces exists between them; force of attraction or the colliding
var simulation = d3.forceSimulation()
.force("link",
d3.forceLink().id(function(d) { return d.word; })
.distance(function(d) { return radius(d.source.quantity / 2) + radius(d.target.quantity / 2); })
.strength(function(d) { return 0.2; })
)
.force("charge", d3.forceManyBody().strength(-500))
.force("center", d3.forceCenter(width / 3 * 2, height / 3 * 2))
.force("collide", d3.forceCollide(function(d) { return d.quantity * 2 }));
//reading the JSON file that inludes the nodes and links
graph = JSON.parse(jsonString);
//defining a link
var link = container.append("g")
.attr("class", "links")
.selectAll("path")
.data(graph.links)
.enter().append("svg:path")
//defining the style of a link
link.style('fill', 'none')
.style('stroke', 'gray')
.style("stroke-width", function(d) { return d.strength; })
//defining a node
var node = container.append("g")
.attr("class", "nodes")
.selectAll("g")
.data(graph.nodes)
.enter().append("g")
.style('transform-origin', '20% 20%')
.on("mouseover", function(d) { mouseover_node(d); })
.on("mouseout", function(d) { mouseout_node(d) })
//defining which function run if a node is dragged
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
//assign the attribute quantity(JSON) to the radius of the node
var circles = node.append("circle")
.attr("r", function(d) { return radius(d.quantity / 2); })
.attr("fill", function(d) { return color(d.cluster); })
.attr("transperancy", "50%");
var labels = node.append("text")
.attr("dy", ".35em")
.attr("text-anchor", "middle")
//define the text that is displayed (word out of the JSON file)
.text(function(d) { return d.word; })
//define the color of the text (cluster out of the JSON file)
.attr("fill", "black");
simulation
.nodes(graph.nodes)
.on("tick", ticked);
simulation.force("link")
.links(graph.links);
//select what is standard zoom and what to do on zoom
svg.call(d3.zoom()
.scaleExtent([1 / 8, 8])
.on("zoom", zoomed));
//Legende
var margin = { top: 10, right: 10, bottom: 10, left: 10 };
var divWidth = document.getElementById("pic").offsetWidth;
var legendHolder = container.append('g')
.attr('transform', "translate(10,30)")
var legend = legendHolder.selectAll(".legend")
.data(color.domain())
.enter().append("g")
.attr("class", "legend")
.attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
legend.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 9)
.style("fill", color);
legend.append("text")
.attr("x", 12)
.attr("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in") {
return "preposition/subordinating conjunction"
} else if (d == "cd") {
return "cardinal digit"
} else return d
});
function zoomed() {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d)
|
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index]
|
{
d.fx = d3.event.x;
d.fy = d3.event.y;
}
|
identifier_body
|
forminput.js
|
setInterval(function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/",
success: function(data) {
//select what should be shown
if (data != false) {
if (show == true) { graphShow(data) } else { chartShow(data) }
//stop checking if preprocessing script is finished
clearInterval(check);
return;
}
}
});
}, 2000);
}
}
//show graph with d3
var graphShow = function(jsonString) {
$("#pic").html("");
$("#pic").append(
"<svg style='width:100%; height:100%;'>" +
"</svg>"
);
var zoom = d3.zoom();
//defining display window
var svg = d3.select("svg"),
width = document.getElementById("pic").clientWidth,
height = document.getElementById("pic").clientHeight
//make svg zoomable
transform = d3.zoomIdentity;
//select svg to container for better zooming functionality
var container = svg.append("g")
.attr("class", "container");
//function for generating different colors depending on the word cluster
var color = d3.scaleOrdinal(d3.schemeCategory20c);
//defining the standard radius of the nodes
var radius = d3.scaleSqrt()
.range([0, 6]);
//simulation of the nodes and links: What kind of forces exists between them; force of attraction or the colliding
var simulation = d3.forceSimulation()
.force("link",
d3.forceLink().id(function(d) { return d.word; })
.distance(function(d) { return radius(d.source.quantity / 2) + radius(d.target.quantity / 2); })
.strength(function(d) { return 0.2; })
)
.force("charge", d3.forceManyBody().strength(-500))
.force("center", d3.forceCenter(width / 3 * 2, height / 3 * 2))
.force("collide", d3.forceCollide(function(d) { return d.quantity * 2 }));
//reading the JSON file that inludes the nodes and links
graph = JSON.parse(jsonString);
//defining a link
var link = container.append("g")
.attr("class", "links")
.selectAll("path")
.data(graph.links)
.enter().append("svg:path")
//defining the style of a link
link.style('fill', 'none')
.style('stroke', 'gray')
.style("stroke-width", function(d) { return d.strength; })
//defining a node
var node = container.append("g")
.attr("class", "nodes")
.selectAll("g")
.data(graph.nodes)
.enter().append("g")
.style('transform-origin', '20% 20%')
.on("mouseover", function(d) { mouseover_node(d); })
.on("mouseout", function(d) { mouseout_node(d) })
//defining which function run if a node is dragged
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
//assign the attribute quantity(JSON) to the radius of the node
var circles = node.append("circle")
.attr("r", function(d) { return radius(d.quantity / 2); })
.attr("fill", function(d) { return color(d.cluster); })
.attr("transperancy", "50%");
var labels = node.append("text")
.attr("dy", ".35em")
.attr("text-anchor", "middle")
//define the text that is displayed (word out of the JSON file)
.text(function(d) { return d.word; })
//define the color of the text (cluster out of the JSON file)
.attr("fill", "black");
simulation
.nodes(graph.nodes)
.on("tick", ticked);
simulation.force("link")
.links(graph.links);
//select what is standard zoom and what to do on zoom
svg.call(d3.zoom()
.scaleExtent([1 / 8, 8])
.on("zoom", zoomed));
//Legende
var margin = { top: 10, right: 10, bottom: 10, left: 10 };
var divWidth = document.getElementById("pic").offsetWidth;
var legendHolder = container.append('g')
.attr('transform', "translate(10,30)")
var legend = legendHolder.selectAll(".legend")
.data(color.domain())
.enter().append("g")
.attr("class", "legend")
.attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
legend.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 9)
.style("fill", color);
legend.append("text")
.attr("x", 12)
.attr("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in") {
return "preposition/subordinating conjunction"
} else if (d == "cd") {
return "cardinal digit"
} else return d
});
function
|
() {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d) {
d.fx = d3.event.x;
d.fy = d3.event.y;
}
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index]
|
zoomed
|
identifier_name
|
forminput.js
|
setInterval(function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/",
success: function(data) {
//select what should be shown
if (data != false) {
if (show == true) { graphShow(data) } else { chartShow(data) }
//stop checking if preprocessing script is finished
clearInterval(check);
return;
}
}
});
}, 2000);
}
}
//show graph with d3
var graphShow = function(jsonString) {
$("#pic").html("");
$("#pic").append(
"<svg style='width:100%; height:100%;'>" +
"</svg>"
);
var zoom = d3.zoom();
//defining display window
var svg = d3.select("svg"),
width = document.getElementById("pic").clientWidth,
height = document.getElementById("pic").clientHeight
//make svg zoomable
transform = d3.zoomIdentity;
//select svg to container for better zooming functionality
var container = svg.append("g")
.attr("class", "container");
//function for generating different colors depending on the word cluster
var color = d3.scaleOrdinal(d3.schemeCategory20c);
//defining the standard radius of the nodes
var radius = d3.scaleSqrt()
.range([0, 6]);
//simulation of the nodes and links: What kind of forces exists between them; force of attraction or the colliding
var simulation = d3.forceSimulation()
.force("link",
d3.forceLink().id(function(d) { return d.word; })
.distance(function(d) { return radius(d.source.quantity / 2) + radius(d.target.quantity / 2); })
.strength(function(d) { return 0.2; })
)
.force("charge", d3.forceManyBody().strength(-500))
.force("center", d3.forceCenter(width / 3 * 2, height / 3 * 2))
.force("collide", d3.forceCollide(function(d) { return d.quantity * 2 }));
//reading the JSON file that inludes the nodes and links
graph = JSON.parse(jsonString);
//defining a link
var link = container.append("g")
.attr("class", "links")
.selectAll("path")
.data(graph.links)
.enter().append("svg:path")
//defining the style of a link
link.style('fill', 'none')
.style('stroke', 'gray')
.style("stroke-width", function(d) { return d.strength; })
//defining a node
var node = container.append("g")
.attr("class", "nodes")
.selectAll("g")
.data(graph.nodes)
.enter().append("g")
.style('transform-origin', '20% 20%')
.on("mouseover", function(d) { mouseover_node(d); })
.on("mouseout", function(d) { mouseout_node(d) })
//defining which function run if a node is dragged
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
//assign the attribute quantity(JSON) to the radius of the node
var circles = node.append("circle")
.attr("r", function(d) { return radius(d.quantity / 2); })
.attr("fill", function(d) { return color(d.cluster); })
.attr("transperancy", "50%");
var labels = node.append("text")
.attr("dy", ".35em")
.attr("text-anchor", "middle")
//define the text that is displayed (word out of the JSON file)
.text(function(d) { return d.word; })
//define the color of the text (cluster out of the JSON file)
.attr("fill", "black");
simulation
.nodes(graph.nodes)
.on("tick", ticked);
simulation.force("link")
.links(graph.links);
//select what is standard zoom and what to do on zoom
svg.call(d3.zoom()
.scaleExtent([1 / 8, 8])
.on("zoom", zoomed));
//Legende
var margin = { top: 10, right: 10, bottom: 10, left: 10 };
var divWidth = document.getElementById("pic").offsetWidth;
var legendHolder = container.append('g')
.attr('transform', "translate(10,30)")
var legend = legendHolder.selectAll(".legend")
.data(color.domain())
.enter().append("g")
.attr("class", "legend")
.attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
legend.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 9)
.style("fill", color);
legend.append("text")
.attr("x", 12)
.attr("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in")
|
else if (d == "cd") {
return "cardinal digit"
} else return d
});
function zoomed() {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d) {
d.fx = d3.event.x;
d.fy = d3.event.y;
}
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index]
|
{
return "preposition/subordinating conjunction"
}
|
conditional_block
|
forminput.js
|
("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in") {
return "preposition/subordinating conjunction"
} else if (d == "cd") {
return "cardinal digit"
} else return d
});
function zoomed() {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d) {
d.fx = d3.event.x;
d.fy = d3.event.y;
}
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index] = true
return true
} else if (d.target == z) {
neighbors[d.source.index] = true
return true
} else {
return false
}
})
.style("stroke-opacity", 1);
node.filter(function(d) { return neighbors[d.index] })
.style("stroke-width", 3);
label.filter(function(d) { return !neighbors[d.index] })
.style("fill-opacity", 0.2);
label.filter(function(d) { return neighbors[d.index] })
.attr("font-size", 16)
};
var mouseout_node = function(z) {
link
.style("stroke-opacity", 0.2);
node
.style("stroke-width", 1)
label
.attr("font-size", 10)
.style("fill-opacity", 1)
};
window.scrollTo(($(document).width() - $(window).width()) / 2, 0);
}
var chartShow = function(jsonString) {
//load the data
jsonData = JSON.parse(jsonString);
var data = jsonData.nodes;
var length = Object.keys(data).length;
var margin = { top: 50, right: 100, bottom: 100, left: 200 },
width = document.getElementById("pic").clientWidth - margin.left - margin.right,
height = document.getElementById("pic").clientHeight * (length / 18) - margin.top - margin.bottom;
$("#pic").html("");
//design x-Axis
var x = d3.scaleLinear()
.range([0, width]);
//design y-Axis
var y = d3.scaleBand()
.rangeRound([0, height])
.padding(.1)
.paddingOuter(.1)
//set distance in percent between y axis and first bar --maybe do it not in percent but in px or something in the future?
.align(0.1);
var xAxis = d3
.axisTop(x)
var yAxis = d3
.axisLeft(y)
//select div in which svg should be created
d3.select("#pic").attr("style", "overflow-y: scroll; margin-top:15px;");
//design svg
var svg = d3.select("#pic").append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
//map data
Object.keys(data).forEach(function(d) {
d.word = d.word;
d.quantity = +d.quantity;
});
x.domain([0, d3.max(data, function(d) { return d.quantity; })]);
y.domain(data.map(function(d) { return d.word; }));
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0,0)")
.call(xAxis)
.append("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", "-.55em")
.attr("transform", "rotate(-180)");
svg.append("g")
.attr("class", "y axis")
.call(yAxis)
.append("text")
.attr("transform", "rotate(-90)")
.style("text-anchor", "end")
.text("quantity");
svg.append('g')
.attr('class', 'grid')
.attr('transform', 'translate(0, ${height})')
.call(d3.axisBottom()
.scale(x)
.tickSize(height, 0, 0)
.tickFormat(''))
const barGroups = svg.selectAll()
.data(data)
.enter()
.append('g')
barGroups
.append('rect')
.attr('class', 'bar')
.attr('y', function(d) { return y(d.word); })
.attr('x', 0)
.attr('height', y.bandwidth())
.attr('width', function(d) { return x(d.quantity); })
.on('mouseenter', function(actual, i) {
d3.selectAll('.quantity')
.attr('opacity', 0)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 0.6)
.attr('y', (d) => y(d.word) - 2)
.attr('height', y.bandwidth() + 4)
})
.on('mouseleave', function() {
d3.selectAll('.quantity')
.attr('opacity', 1)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 1)
.attr('y', (d) => y(d.word))
.attr('height', y.bandwidth())
svg.selectAll('#limit').remove()
})
barGroups
.append('text')
.attr('class', 'value')
.attr('y', (d) => y(d.word) + y.bandwidth() / 2)
|
random_line_split
|
||
supervisor_processor.go
|
runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context) {
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
}
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor) processKill() {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
// Okay, so a Runnable has quit. What now?
n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for
|
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node
|
{
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
}
|
conditional_block
|
supervisor_processor.go
|
runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context) {
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
}
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor)
|
() {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
// Okay, so a Runnable has quit. What now?
n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for {
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
}
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
|
processKill
|
identifier_name
|
supervisor_processor.go
|
runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context) {
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
}
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor) processKill() {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
|
n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for {
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
}
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
|
// Okay, so a Runnable has quit. What now?
|
random_line_split
|
supervisor_processor.go
|
runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context)
|
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
}
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor) processKill() {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
// Okay, so a Runnable has quit. What now?
n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for {
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
}
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
|
{
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
|
identifier_body
|
ipc.rs
|
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn
|
<T>(&self, requests: T) -> Self::Batch
where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None));
let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream {
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
}
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id {
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
} else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message::
|
send_batch
|
identifier_name
|
ipc.rs
|
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn send_batch<T>(&self, requests: T) -> Self::Batch
where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None));
let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream {
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
}
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id
|
else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message::
|
{
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
}
|
conditional_block
|
ipc.rs
|
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn send_batch<T>(&self, requests: T) -> Self::Batch
where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None));
let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream
|
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id {
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
} else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message
|
{
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
}
|
identifier_body
|
ipc.rs
|
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn send_batch<T>(&self, requests: T) -> Self::Batch
|
let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream {
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
}
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id {
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
} else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message::Notification
|
where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None));
|
random_line_split
|
map.js
|
ATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function b0(T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN(cM)) {
return
}
if (this.offsetX == T && this.offsetY == cM) {
return
}
this._updateCenterPoint(this.offsetX - T, this.offsetY - cM, cK);
T = Math.round(T);
cM = Math.round(cM);
this.offsetX = T;
this.offsetY = cM;
this.platform.style.left = T + "px";
this.platform.style.top = cM + "px";
this.maskLayer.style.left = -T + "px";
this.maskLayer.style.top = -cM + "px";
if (cL != false) {
this.dispatchEvent(new bf("onmoving"))
}
},
panTo: function (cK, cM) {
if (!(cK instanceof cc)) {
return
}
var cL = this.pointToPixel(cK);
var T = Math.round(this.width / 2);
var cN = Math.round(this.height / 2);
cM = cM || {};
if (Math.abs(T - cL.x) > this.width || Math.abs(cN - cL.y) > this.height || cM.noAnimation) {
this._panTo(T - cL.x, cN - cL.y, cK)
} else {
this._panBy(T - cL.x, cN - cL.y, {
duration: cM.duration
})
}
},
_panTo: function (cK, T, cM) {
var cL = this.temp;
if (cL.operating == true) {
return
}
if (cL.dragAni) {
cL.dragAni.stop()
}
this.dispatchEvent(new bf("onmovestart"));
this._setPlatformPosition(this.offsetX + cK, this.offsetY + T, cM);
this.dispatchEvent(new bf("onmoveend"))
},
panBy: function (cK, T, cL) {
cK = Math.round(cK) || 0;
T = Math.round(T) || 0;
cL = cL || {};
if (Math.abs(cK) <= this.width && Math.abs(T) <= this.height && (!cL.noAnimation)) {
this._panBy(cK, T)
} else {
this._panTo(cK, T)
}
},
_panBy: function (cK, T, cN) {
if (this.temp.operating == true)
|
{
return
}
|
conditional_block
|
|
map.js
|
K)
|
aY(cK);
cK.addEventListener("maptypechange", function () {
aY(cK)
});
cK.addControl(cL);
var T = new b0();
T._opts = {
printable: true
};
cK.addControl(T);
cK.addEventListener("resize", function () {
if (this.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show();
cL.setOffset(cM)
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
});
if (cK.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show()
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
if (cK.highResolutionEnabled()) {
T.setOffset(new aG(3, 2))
}
}
function aY(T) {
var cQ = "11px",
cP = T.cityName || "\u4e2d\u56fd",
cM = T.getMapType(),
cR = ["\u5e38\u5dde\u5e02", "\u6210\u90fd\u5e02", "\u5927\u8fde\u5e02", "\u91cd\u5e86\u5e02", "\u5357\u4eac\u5e02", "\u5357\u660c\u5e02", "\u6b66\u6c49\u5e02"],
cL = [],
cO, cN = "color:#fff;font-size:" + cQ + ";text-shadow:0 1px 3px black";
switch (cM) {
case BMAP_SATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function b0(T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN
|
{
if (cK.temp.copyadded) {
return
}
cK.temp.copyadded = true;
var cM = new aG(81, 2);
if (az()) {
if (cK.highResolutionEnabled()) {
cM.width = 148;
fontSize = "21px"
} else {
cM.width = 72;
cM.height = 0
}
}
var cL = new al({
offset: cM,
printable: true
});
cK.cpyCtrl = cL;
|
identifier_body
|
map.js
|
K) {
if (cK.temp.copyadded) {
return
}
cK.temp.copyadded = true;
var cM = new aG(81, 2);
if (az()) {
if (cK.highResolutionEnabled()) {
cM.width = 148;
fontSize = "21px"
} else {
cM.width = 72;
cM.height = 0
}
}
var cL = new al({
offset: cM,
printable: true
});
cK.cpyCtrl = cL;
aY(cK);
cK.addEventListener("maptypechange", function () {
aY(cK)
});
cK.addControl(cL);
var T = new b0();
T._opts = {
printable: true
};
cK.addControl(T);
cK.addEventListener("resize", function () {
if (this.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show();
cL.setOffset(cM)
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
});
if (cK.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show()
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
if (cK.highResolutionEnabled()) {
T.setOffset(new aG(3, 2))
}
}
function aY(T) {
var cQ = "11px",
cP = T.cityName || "\u4e2d\u56fd",
cM = T.getMapType(),
cR = ["\u5e38\u5dde\u5e02", "\u6210\u90fd\u5e02", "\u5927\u8fde\u5e02", "\u91cd\u5e86\u5e02", "\u5357\u4eac\u5e02", "\u5357\u660c\u5e02", "\u6b66\u6c49\u5e02"],
cL = [],
cO, cN = "color:#fff;font-size:" + cQ + ";text-shadow:0 1px 3px black";
switch (cM) {
case BMAP_SATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function
|
(T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN(cM
|
b0
|
identifier_name
|
map.js
|
(this.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show();
cL.setOffset(cM)
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
});
if (cK.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show()
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
if (cK.highResolutionEnabled()) {
T.setOffset(new aG(3, 2))
}
}
function aY(T) {
var cQ = "11px",
cP = T.cityName || "\u4e2d\u56fd",
cM = T.getMapType(),
cR = ["\u5e38\u5dde\u5e02", "\u6210\u90fd\u5e02", "\u5927\u8fde\u5e02", "\u91cd\u5e86\u5e02", "\u5357\u4eac\u5e02", "\u5357\u660c\u5e02", "\u6b66\u6c49\u5e02"],
cL = [],
cO, cN = "color:#fff;font-size:" + cQ + ";text-shadow:0 1px 3px black";
switch (cM) {
case BMAP_SATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function b0(T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN(cM)) {
return
}
if (this.offsetX == T && this.offsetY == cM) {
return
}
this._updateCenterPoint(this.offsetX - T, this.offsetY - cM, cK);
T = Math.round(T);
cM = Math.round(cM);
this.offsetX = T;
this.offsetY = cM;
this.platform.style.left = T + "px";
this.platform.style.top = cM + "px";
this.maskLayer.style.left = -T + "px";
this.maskLayer.style.top = -cM + "px";
if (cL != false) {
this.dispatchEvent(new bf("onmoving"))
}
},
panTo: function (cK, cM) {
if (!(cK instanceof cc)) {
return
}
var cL = this.pointToPixel(cK);
|
var T = Math.round(this.width / 2);
|
random_line_split
|
|
GroupINN.py
|
classmethod
def update_parser_argument(cls, parser: argparse.ArgumentParser):
args, _ = parser.parse_known_args()
parser.set_defaults(selected_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN)
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
|
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TP": tf.metrics.true_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TN": tf.metrics.true_negatives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FP": tf.metrics.false_positives(
labels=labels, predictions=predict
|
tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss")
|
conditional_block
|
GroupINN.py
|
:
cross_entropy = 1.0
neg_penalty_reduce = 0.1
neg_penalty_gnn = 0.2
ortho_penalty_p = 0.2
ortho_penalty_n = 0.2
variance_penalty_p = 0.3
variance_penalty_n = 0.5
l2_penalty = 2e-3
@classmethod
def update_parser_argument(cls, parser: argparse.ArgumentParser):
args, _ = parser.parse_known_args()
parser.set_defaults(selected_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN)
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss")
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy
|
loss_weights
|
identifier_name
|
|
GroupINN.py
|
classmethod
def update_parser_argument(cls, parser: argparse.ArgumentParser):
args, _ = parser.parse_known_args()
parser.set_defaults(selected_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
|
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss")
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TP": tf.metrics.true_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TN": tf.metrics.true_negatives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FP": tf.metrics.false_positives(
labels=labels, predictions=predict
|
self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN)
|
identifier_body
|
GroupINN.py
|
_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN)
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss")
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TP": tf.metrics.true_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TN": tf.metrics.true_negatives(
|
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FP": tf.metrics.false_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FN": tf.metrics.false_negatives(
labels=labels, predictions=predictions["classes"]),
|
random_line_split
|
|
plot_TL_results.py
|
estimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def
|
(b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max:
cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1
polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[1,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[1,i].legend(fontsize = SMALL_SIZE)
expname = experiment_folders[i][0]['name'].split('_')[0]
|
indicator_loss
|
identifier_name
|
plot_TL_results.py
|
estimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def indicator_loss(b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
|
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max:
cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1
polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[1,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[1,i].legend(fontsize = SMALL_SIZE)
expname = experiment_folders[i][0]['name'].split('_')[0]
title
|
experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
|
conditional_block
|
plot_TL_results.py
|
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def indicator_loss(b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max:
cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1
polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else
|
"""
Return true, if A>=B
where >= is loewner order (matrix comparison)
if A>=B, A spans over B
used to detect poor fits of coregionalization
if [coregionalization matrix] > [measured covariance]is broken,
covariance matrix is overestimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list
|
identifier_body
|
|
plot_TL_results.py
|
overestimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def indicator_loss(b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max:
|
polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[1,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[1,i].legend(fontsize = SMALL_SIZE)
expname = experiment_folders[i][0]['name'].split('_')[0]
|
cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1
|
random_line_split
|
mod.rs
|
meta_data_add_string(meta, c_key.as_ptr(), c_value.as_ptr());
}
}
MetaValue::SignedInt(i) => unsafe {
meta_data_add_signed_int(meta, c_key.as_ptr(), *i);
},
MetaValue::UnsignedInt(u) => unsafe {
meta_data_add_unsigned_int(meta, c_key.as_ptr(), *u);
},
MetaValue::Double(d) => unsafe {
meta_data_add_double(meta, c_key.as_ptr(), *d);
},
MetaValue::Boolean(b) => unsafe {
meta_data_add_boolean(meta, c_key.as_ptr(), *b);
},
}
}
Ok(())
}
fn from_meta_data(
plugin: &str,
meta: *mut meta_data_t,
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
if meta.is_null() {
return Ok(HashMap::new());
}
let mut c_toc: *mut *mut c_char = ptr::null_mut();
let count_or_err = unsafe { meta_data_toc(meta, &mut c_toc as *mut *mut *mut c_char) };
if count_or_err < 0 {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: "toc".to_string(),
msg: "invalid parameters to meta_data_toc",
});
}
let count = count_or_err as usize;
if count == 0 {
return Ok(HashMap::new());
}
let toc = unsafe { slice::from_raw_parts(c_toc, count) };
let conversion_result = from_meta_data_with_toc(plugin, meta, toc);
for c_key_ptr in toc {
unsafe {
libc::free(*c_key_ptr as *mut c_void);
}
}
unsafe {
libc::free(c_toc as *mut c_void);
}
conversion_result
}
fn from_meta_data_with_toc(
plugin: &str,
meta: *mut meta_data_t,
toc: &[*mut c_char],
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
let mut meta_hm = HashMap::with_capacity(toc.len());
for c_key_ptr in toc {
let (c_key, key, value_type) = unsafe {
let c_key: &CStr = CStr::from_ptr(*c_key_ptr);
let key: String = c_key
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata key",
err: e,
})?
.to_string();
let value_type: u32 = meta_data_type(meta, c_key.as_ptr()) as u32;
(c_key, key, value_type)
};
match value_type {
MD_TYPE_BOOLEAN => {
let mut c_value = false;
unsafe {
meta_data_get_boolean(meta, c_key.as_ptr(), &mut c_value as *mut bool);
}
meta_hm.insert(key, MetaValue::Boolean(c_value));
}
MD_TYPE_DOUBLE => {
let mut c_value = 0.0;
unsafe {
meta_data_get_double(meta, c_key.as_ptr(), &mut c_value as *mut f64);
}
meta_hm.insert(key, MetaValue::Double(c_value));
}
MD_TYPE_SIGNED_INT => {
let mut c_value = 0i64;
unsafe {
meta_data_get_signed_int(meta, c_key.as_ptr(), &mut c_value as *mut i64);
}
meta_hm.insert(key, MetaValue::SignedInt(c_value));
}
MD_TYPE_STRING => {
let value: String = unsafe {
let mut c_value: *mut c_char = ptr::null_mut();
meta_data_get_string(meta, c_key.as_ptr(), &mut c_value as *mut *mut c_char);
CStr::from_ptr(c_value)
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata value",
err: e,
})?
.to_string()
};
meta_hm.insert(key, MetaValue::String(value));
}
MD_TYPE_UNSIGNED_INT => {
let mut c_value = 0u64;
unsafe {
meta_data_get_unsigned_int(meta, c_key.as_ptr(), &mut c_value as *mut u64);
}
meta_hm.insert(key, MetaValue::UnsignedInt(c_value));
}
_ => {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: key,
msg: "unknown metadata type",
});
}
}
}
Ok(meta_hm)
}
fn submit_array_res(s: &str, name: &'static str) -> Result<[c_char; ARR_LENGTH], SubmitError> {
to_array_res(s).map_err(|e| SubmitError::Field { name, err: e })
}
/// Collectd stores textual data in fixed sized arrays, so this function will convert a string
/// slice into array compatible with collectd's text fields. Be aware that `ARR_LENGTH` is 64
/// before collectd 5.7
fn to_array_res(s: &str) -> Result<[c_char; ARR_LENGTH], ArrayError> {
// By checking if the length is greater than or *equal* to, we guarantee a trailing null
if s.len() >= ARR_LENGTH {
return Err(ArrayError::TooLong(s.len()));
}
let bytes = s.as_bytes();
// Using memchr to find a null and work around it is 10x faster than
// using a CString to get the bytes_with_nul and cut the time to submit
// values to collectd in half.
if let Some(ind) = memchr(0, bytes) {
return Err(ArrayError::NullPresent(ind, s.to_string()));
}
let mut arr = [0; ARR_LENGTH];
arr[0..bytes.len()].copy_from_slice(bytes);
Ok(unsafe { ::std::mem::transmute(arr) })
}
fn receive_array<'a>(
s: &'a [c_char; ARR_LENGTH],
plugin: &str,
field: &'static str,
) -> Result<&'a str, ReceiveError> {
from_array(s).map_err(|e| ReceiveError::Utf8 {
plugin: String::from(plugin),
field,
err: e,
})
}
/// Turns a fixed size character array into string slice, if possible
pub fn from_array(s: &[c_char; ARR_LENGTH]) -> Result<&str, Utf8Error> {
unsafe {
let a = s as *const [c_char; ARR_LENGTH] as *const c_char;
CStr::from_ptr(a).to_str()
}
}
/// Returns if the string is empty or not
pub fn empty_to_none(s: &str) -> Option<&str> {
if s.is_empty() {
None
} else {
Some(s)
}
}
pub fn length(len: u64) -> usize {
len as usize
}
pub fn get_default_interval() -> u64 {
0
}
#[cfg(test)]
mod tests {
use self::cdtime::nanos_to_collectd;
use super::*;
use crate::bindings::data_source_t;
use std::os::raw::c_char;
#[test]
fn test_empty_to_none() {
assert_eq!(None, empty_to_none(""));
let s = "hi";
assert_eq!(Some("hi"), empty_to_none(s));
}
#[test]
fn test_from_array() {
let mut name: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
name[0] = b'h' as c_char;
name[1] = b'i' as c_char;
assert_eq!(Ok("hi"), from_array(&name));
}
#[test]
fn test_to_array() {
let actual = to_array_res("Hi");
assert!(actual.is_ok());
assert_eq!(&actual.unwrap()[..2], &[b'H' as c_char, b'i' as c_char]);
}
#[test]
fn test_to_array_res_nul() {
let actual = to_array_res("hi\0");
assert!(actual.is_err());
}
#[test]
fn test_to_array_res_too_long() {
let actual = to_array_res(
"Hello check this out, I am a long string and there is no signs of stopping; well, maybe one day I will stop when I get too longggggggggggggggggggggggggggggggggggg",
);
assert!(actual.is_err());
}
#[test]
fn test_submit() {
let values = vec![Value::Gauge(15.0), Value::Gauge(10.0), Value::Gauge(12.0)];
let result = ValueListBuilder::new("my-plugin", "load")
.values(&values)
.submit();
assert_eq!(result.unwrap(), ());
}
#[test]
fn test_recv_value_list_conversion() {
let empty: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
let mut metric: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
metric[0] = b'h' as c_char;
metric[1] = b'o' as c_char;
|
random_line_split
|
||
mod.rs
|
user", "system" metrics.
pub plugin_instance: Option<&'a str>,
/// This is the string found in types.db, determines how many values are expected and how they
/// should be interpreted
pub type_: &'a str,
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub type_instance: Option<&'a str>,
/// The hostname where the values were collectd
pub host: &'a str,
/// The timestamp at which the value was collected
pub time: DateTime<Utc>,
/// The interval in which new values are to be expected
pub interval: Duration,
/// Metadata associated to the reported values
pub meta: HashMap<String, MetaValue>,
// Keep the original list and set around for calculating rates on demand
original_list: *const value_list_t,
original_set: *const data_set_t,
}
impl<'a> ValueList<'a> {
/// Collectd does not automatically convert `Derived` values into a rate. This is why many
/// write plugins have a `StoreRates` config option so that these rates are calculated on
/// demand from collectd's internal cache. This function will return a vector that can supercede
/// the `values` field that contains the rate of all non-gauge values. Values that are gauges
/// remain unchanged, so one doesn't need to resort back to `values` field as this function
/// will return everything prepped for submission.
pub fn rates(&self) -> Result<Cow<'_, Vec<ValueReport<'a>>>, CacheRateError> {
// As an optimization step, if we know all values are gauges there is no need to call out
// to uc_get_rate as no values will be changed
let all_gauges = self.values.iter().all(|x| match x.value {
Value::Gauge(_) => true,
_ => false,
});
if all_gauges {
return Ok(Cow::Borrowed(&self.values));
}
let ptr = unsafe { uc_get_rate(self.original_set, self.original_list) };
if !ptr.is_null() {
let nv = unsafe { slice::from_raw_parts(ptr, self.values.len()) }
.iter()
.zip(self.values.iter())
.map(|(rate, report)| match report.value {
Value::Gauge(_) => *report,
_ => ValueReport {
value: Value::Gauge(*rate),
..*report
},
})
.collect();
Ok(Cow::Owned(nv))
} else {
Err(CacheRateError)
}
}
pub fn from<'b>(
set: &'b data_set_t,
list: &'b value_list_t,
) -> Result<ValueList<'b>, ReceiveError> {
let plugin = receive_array(&list.plugin, "", "plugin name")?;
let ds_len = length(set.ds_num);
let list_len = length(list.values_len);
let values: Result<Vec<ValueReport<'_>>, ReceiveError> =
unsafe { slice::from_raw_parts(list.values, list_len) }
.iter()
.zip(unsafe { slice::from_raw_parts(set.ds, ds_len) })
.map(|(val, source)| unsafe {
let v = match ::std::mem::transmute(source.type_) {
ValueType::Gauge => Value::Gauge(val.gauge),
ValueType::Counter => Value::Counter(val.counter),
ValueType::Derive => Value::Derive(val.derive),
ValueType::Absolute => Value::Absolute(val.absolute),
};
let name = receive_array(&source.name, plugin, "data source name")?;
Ok(ValueReport {
name,
value: v,
min: source.min,
max: source.max,
})
})
.collect();
assert!(list.time > 0);
assert!(list.interval > 0);
let plugin_instance =
receive_array(&list.plugin_instance, plugin, "plugin_instance").map(empty_to_none)?;
let type_ = receive_array(&list.type_, plugin, "type")?;
let type_instance =
receive_array(&list.type_instance, plugin, "type_instance").map(empty_to_none)?;
let host = receive_array(&list.host, plugin, "host")?;
let meta = from_meta_data(plugin, list.meta)?;
Ok(ValueList {
values: values?,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: CdTime::from(list.time).into(),
interval: CdTime::from(list.interval).into(),
meta,
original_list: list,
original_set: set,
})
}
}
#[derive(Debug, PartialEq, Clone)]
struct SubmitValueList<'a> {
values: &'a [Value],
plugin_instance: Option<&'a str>,
plugin: &'a str,
type_: &'a str,
type_instance: Option<&'a str>,
host: Option<&'a str>,
time: Option<DateTime<Utc>>,
interval: Option<Duration>,
meta: HashMap<&'a str, MetaValue>,
}
/// Creates a value list to report values to collectd.
#[derive(Debug, PartialEq, Clone)]
pub struct ValueListBuilder<'a> {
list: SubmitValueList<'a>,
}
impl<'a> ValueListBuilder<'a> {
/// Primes a value list for submission. `plugin` will most likely be the name from the
/// `PluginManager` and `type_` is the datatype found in types.db
pub fn new<T: Into<&'a str>, U: Into<&'a str>>(plugin: T, type_: U) -> ValueListBuilder<'a> {
ValueListBuilder {
list: SubmitValueList {
values: &[],
plugin_instance: None,
plugin: plugin.into(),
type_: type_.into(),
type_instance: None,
host: None,
time: None,
interval: None,
meta: HashMap::new(),
},
}
}
/// A set of observed values that belong to the same plugin and type instance
pub fn values(mut self, values: &'a [Value]) -> ValueListBuilder<'a> {
self.list.values = values;
self
}
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub fn plugin_instance<T: Into<&'a str>>(mut self, plugin_instance: T) -> ValueListBuilder<'a> {
self.list.plugin_instance = Some(plugin_instance.into());
self
}
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub fn type_instance<T: Into<&'a str>>(mut self, type_instance: T) -> ValueListBuilder<'a> {
self.list.type_instance = Some(type_instance.into());
self
}
/// Override the machine's hostname that the observed values will be attributed to. Best to
/// override when observing values from another machine
pub fn host<T: Into<&'a str>>(mut self, host: T) -> ValueListBuilder<'a> {
self.list.host = Some(host.into());
self
}
/// The timestamp at which the value was collected. Overrides the default time, which is when
/// collectd receives the values from `submit`. Use only if there is a significant delay is
/// metrics gathering or if submitting values from the past.
pub fn time(mut self, dt: DateTime<Utc>) -> ValueListBuilder<'a>
|
/// The interval in which new values are to be expected. This is typically handled at a global
/// or plugin level. Use at your own discretion.
pub fn interval(mut self, interval: Duration) -> ValueListBuilder<'a> {
self.list.interval = Some(interval);
self
}
/// Add a metadata entry.
///
/// Multiple entries can be added by calling this method. If the same key is used, only the last
/// entry is kept.
pub fn metadata(mut self, key: &'a str, value: MetaValue) -> ValueListBuilder<'a> {
self.list.meta.insert(key, value);
self
}
/// Submits the observed values to collectd and returns errors if encountered
pub fn submit(self) -> Result<(), SubmitError> {
let mut v: Vec<value_t> = self.list.values.iter().map(|&x| x.into()).collect();
let plugin_instance = self
.list
.plugin_instance
.map(|x| submit_array_res(x, "plugin_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let type_instance = self
.list
.type_instance
.map(|x| submit_array_res(x, "type_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let host = self
.list
.host
.map(|x
|
{
self.list.time = Some(dt);
self
}
|
identifier_body
|
mod.rs
|
<'a> {
/// Name of the metric. If values has a length of 1, this is often just "value"
pub name: &'a str,
/// The value reported
pub value: Value,
/// Minimum value seen in an interval
pub min: f64,
/// Maximum value seen in an interval
pub max: f64,
}
/// Contains values and metadata that collectd has collected from plugins
#[derive(Debug, PartialEq, Clone)]
pub struct ValueList<'a> {
pub values: Vec<ValueReport<'a>>,
/// The plugin that submitted this value. This would be your `PluginManager` when submitting
/// values
pub plugin: &'a str,
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub plugin_instance: Option<&'a str>,
/// This is the string found in types.db, determines how many values are expected and how they
/// should be interpreted
pub type_: &'a str,
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub type_instance: Option<&'a str>,
/// The hostname where the values were collectd
pub host: &'a str,
/// The timestamp at which the value was collected
pub time: DateTime<Utc>,
/// The interval in which new values are to be expected
pub interval: Duration,
/// Metadata associated to the reported values
pub meta: HashMap<String, MetaValue>,
// Keep the original list and set around for calculating rates on demand
original_list: *const value_list_t,
original_set: *const data_set_t,
}
impl<'a> ValueList<'a> {
/// Collectd does not automatically convert `Derived` values into a rate. This is why many
/// write plugins have a `StoreRates` config option so that these rates are calculated on
/// demand from collectd's internal cache. This function will return a vector that can supercede
/// the `values` field that contains the rate of all non-gauge values. Values that are gauges
/// remain unchanged, so one doesn't need to resort back to `values` field as this function
/// will return everything prepped for submission.
pub fn rates(&self) -> Result<Cow<'_, Vec<ValueReport<'a>>>, CacheRateError> {
// As an optimization step, if we know all values are gauges there is no need to call out
// to uc_get_rate as no values will be changed
let all_gauges = self.values.iter().all(|x| match x.value {
Value::Gauge(_) => true,
_ => false,
});
if all_gauges {
return Ok(Cow::Borrowed(&self.values));
}
let ptr = unsafe { uc_get_rate(self.original_set, self.original_list) };
if !ptr.is_null() {
let nv = unsafe { slice::from_raw_parts(ptr, self.values.len()) }
.iter()
.zip(self.values.iter())
.map(|(rate, report)| match report.value {
Value::Gauge(_) => *report,
_ => ValueReport {
value: Value::Gauge(*rate),
..*report
},
})
.collect();
Ok(Cow::Owned(nv))
} else {
Err(CacheRateError)
}
}
pub fn from<'b>(
set: &'b data_set_t,
list: &'b value_list_t,
) -> Result<ValueList<'b>, ReceiveError> {
let plugin = receive_array(&list.plugin, "", "plugin name")?;
let ds_len = length(set.ds_num);
let list_len = length(list.values_len);
let values: Result<Vec<ValueReport<'_>>, ReceiveError> =
unsafe { slice::from_raw_parts(list.values, list_len) }
.iter()
.zip(unsafe { slice::from_raw_parts(set.ds, ds_len) })
.map(|(val, source)| unsafe {
let v = match ::std::mem::transmute(source.type_) {
ValueType::Gauge => Value::Gauge(val.gauge),
ValueType::Counter => Value::Counter(val.counter),
ValueType::Derive => Value::Derive(val.derive),
ValueType::Absolute => Value::Absolute(val.absolute),
};
let name = receive_array(&source.name, plugin, "data source name")?;
Ok(ValueReport {
name,
value: v,
min: source.min,
max: source.max,
})
})
.collect();
assert!(list.time > 0);
assert!(list.interval > 0);
let plugin_instance =
receive_array(&list.plugin_instance, plugin, "plugin_instance").map(empty_to_none)?;
let type_ = receive_array(&list.type_, plugin, "type")?;
let type_instance =
receive_array(&list.type_instance, plugin, "type_instance").map(empty_to_none)?;
let host = receive_array(&list.host, plugin, "host")?;
let meta = from_meta_data(plugin, list.meta)?;
Ok(ValueList {
values: values?,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: CdTime::from(list.time).into(),
interval: CdTime::from(list.interval).into(),
meta,
original_list: list,
original_set: set,
})
}
}
#[derive(Debug, PartialEq, Clone)]
struct SubmitValueList<'a> {
values: &'a [Value],
plugin_instance: Option<&'a str>,
plugin: &'a str,
type_: &'a str,
type_instance: Option<&'a str>,
host: Option<&'a str>,
time: Option<DateTime<Utc>>,
interval: Option<Duration>,
meta: HashMap<&'a str, MetaValue>,
}
/// Creates a value list to report values to collectd.
#[derive(Debug, PartialEq, Clone)]
pub struct ValueListBuilder<'a> {
list: SubmitValueList<'a>,
}
impl<'a> ValueListBuilder<'a> {
/// Primes a value list for submission. `plugin` will most likely be the name from the
/// `PluginManager` and `type_` is the datatype found in types.db
pub fn new<T: Into<&'a str>, U: Into<&'a str>>(plugin: T, type_: U) -> ValueListBuilder<'a> {
ValueListBuilder {
list: SubmitValueList {
values: &[],
plugin_instance: None,
plugin: plugin.into(),
type_: type_.into(),
type_instance: None,
host: None,
time: None,
interval: None,
meta: HashMap::new(),
},
}
}
/// A set of observed values that belong to the same plugin and type instance
pub fn values(mut self, values: &'a [Value]) -> ValueListBuilder<'a> {
self.list.values = values;
self
}
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub fn plugin_instance<T: Into<&'a str>>(mut self, plugin_instance: T) -> ValueListBuilder<'a> {
self.list.plugin_instance = Some(plugin_instance.into());
self
}
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub fn type_instance<T: Into<&'a str>>(mut self, type_instance: T) -> ValueListBuilder<'a> {
self.list.type_instance = Some(type_instance.into());
self
}
/// Override the machine's hostname that the observed values will be attributed to. Best to
/// override when observing values from another machine
pub fn host<T: Into<&'a str>>(mut self, host: T) -> ValueListBuilder<'a> {
self.list.host = Some(host.into());
self
}
/// The timestamp at which the value was collected. Overrides the default time, which is when
/// collectd receives the values from `submit`. Use only if there is a significant delay is
/// metrics gathering or if submitting values from the past.
pub fn time(mut self, dt: DateTime<Utc>) -> ValueListBuilder<'a> {
self.list.time = Some(dt);
self
}
/// The interval in which new values are to be expected. This is typically handled at a global
/// or plugin level. Use at your own discretion.
pub fn interval(mut self, interval: Duration) -> ValueListBuilder<'a> {
self.list.interval = Some(interval);
self
}
/// Add a metadata entry.
///
/// Multiple entries can be added by calling this method. If the same key is used, only the last
/// entry is kept.
pub fn metadata(mut self, key: &'a str, value: MetaValue) -> ValueListBuilder<'
|
ValueReport
|
identifier_name
|
|
input.ts
|
Down(e) },
{ target: window, eventName: "mouseup", action: (e: MouseEvent) => onPotentialDoubleClick(e) },
{ target: window, eventName: "wheel", action: (e: WheelEvent) => onWheelScroll(e), options: { passive: false } },
{ target: window, eventName: "modifyinputfield", action: (e: CustomEvent) => onModifyInputField(e) },
{ target: window, eventName: "focusout", action: () => (canvasFocused = false) },
{ target: window.document, eventName: "contextmenu", action: (e: MouseEvent) => onContextMenu(e) },
{ target: window.document, eventName: "fullscreenchange", action: () => fullscreen.fullscreenModeChanged() },
{ target: window.document.body, eventName: "paste", action: (e: ClipboardEvent) => onPaste(e) },
];
// Event bindings
function bindListeners(): void {
// Add event bindings for the lifetime of the application
listeners.forEach(({ target, eventName, action, options }) => target.addEventListener(eventName, action, options));
}
function unbindListeners(): void {
// Remove event bindings after the lifetime of the application (or on hot-module replacement during development)
listeners.forEach(({ target, eventName, action, options }) => target.removeEventListener(eventName, action, options));
}
// Keyboard events
async function shouldRedirectKeyboardEventToBackend(e: KeyboardEvent): Promise<boolean> {
// Don't redirect when a modal is covering the workspace
if (get(dialog).visible) return false;
const key = await getLocalizedScanCode(e);
// TODO: Switch to a system where everything is sent to the backend, then the input preprocessor makes decisions and kicks some inputs back to the frontend
const accelKey = platformIsMac() ? e.metaKey : e.ctrlKey;
// Don't redirect user input from text entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
}
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function onPointerMove(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas)
|
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
//
|
{
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
}
|
conditional_block
|
input.ts
|
Down(e) },
{ target: window, eventName: "mouseup", action: (e: MouseEvent) => onPotentialDoubleClick(e) },
{ target: window, eventName: "wheel", action: (e: WheelEvent) => onWheelScroll(e), options: { passive: false } },
{ target: window, eventName: "modifyinputfield", action: (e: CustomEvent) => onModifyInputField(e) },
{ target: window, eventName: "focusout", action: () => (canvasFocused = false) },
{ target: window.document, eventName: "contextmenu", action: (e: MouseEvent) => onContextMenu(e) },
{ target: window.document, eventName: "fullscreenchange", action: () => fullscreen.fullscreenModeChanged() },
{ target: window.document.body, eventName: "paste", action: (e: ClipboardEvent) => onPaste(e) },
];
// Event bindings
function bindListeners(): void {
// Add event bindings for the lifetime of the application
listeners.forEach(({ target, eventName, action, options }) => target.addEventListener(eventName, action, options));
}
function unbindListeners(): void {
// Remove event bindings after the lifetime of the application (or on hot-module replacement during development)
listeners.forEach(({ target, eventName, action, options }) => target.removeEventListener(eventName, action, options));
}
// Keyboard events
async function shouldRedirectKeyboardEventToBackend(e: KeyboardEvent): Promise<boolean> {
// Don't redirect when a modal is covering the workspace
if (get(dialog).visible) return false;
const key = await getLocalizedScanCode(e);
// TODO: Switch to a system where everything is sent to the backend, then the input preprocessor makes decisions and kicks some inputs back to the frontend
const accelKey = platformIsMac() ? e.metaKey : e.ctrlKey;
// Don't redirect user input from text entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void>
|
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function onPointerMove(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
}
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
//
|
{
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
}
|
identifier_body
|
input.ts
|
entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
}
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function onPointerMove(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
}
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
// We keep a copy of the text input element to check against when it's active for text entry.
function onModifyInputField(e: CustomEvent): void {
textToolInteractiveInputElement = e.detail;
}
// Window events
function onWindowResize(container: HTMLElement): void {
const viewports = Array.from(container.querySelectorAll("[data-viewport]"));
const boundsOfViewports = viewports.map((canvas) => {
const bounds = canvas.getBoundingClientRect();
return [bounds.left, bounds.top, bounds.right, bounds.bottom];
});
const flattened = boundsOfViewports.flat();
const data = Float64Array.from(flattened);
if (boundsOfViewports.length > 0) editor.instance.boundsOfViewports(data);
}
async function onBeforeUnload(e: BeforeUnloadEvent): Promise<void> {
const activeDocument = get(portfolio).documents[get(portfolio).activeDocumentIndex];
if (activeDocument && !activeDocument.isAutoSaved) editor.instance.triggerAutoSave(activeDocument.id);
// Skip the message if the editor crashed, since work is already lost
if (await editor.instance.hasCrashed()) return;
// Skip the message during development, since it's annoying when testing
if (await editor.instance.inDevelopmentMode()) return;
const allDocumentsSaved = get(portfolio).documents.reduce((acc, doc) => acc && doc.isSaved, true);
if (!allDocumentsSaved) {
e.returnValue = "Unsaved work will be lost if the web browser tab is closed. Close anyway?";
e.preventDefault();
}
}
function onPaste(e: ClipboardEvent): void {
const dataTransfer = e.clipboardData;
if (!dataTransfer || targetIsTextField(e.target || undefined)) return;
e.preventDefault();
Array.from(dataTransfer.items).forEach((item) => {
if (item.type === "text/plain") {
|
item.getAsString((text) => {
if (text.startsWith("graphite/layer: ")) {
|
random_line_split
|
|
input.ts
|
Down(e) },
{ target: window, eventName: "mouseup", action: (e: MouseEvent) => onPotentialDoubleClick(e) },
{ target: window, eventName: "wheel", action: (e: WheelEvent) => onWheelScroll(e), options: { passive: false } },
{ target: window, eventName: "modifyinputfield", action: (e: CustomEvent) => onModifyInputField(e) },
{ target: window, eventName: "focusout", action: () => (canvasFocused = false) },
{ target: window.document, eventName: "contextmenu", action: (e: MouseEvent) => onContextMenu(e) },
{ target: window.document, eventName: "fullscreenchange", action: () => fullscreen.fullscreenModeChanged() },
{ target: window.document.body, eventName: "paste", action: (e: ClipboardEvent) => onPaste(e) },
];
// Event bindings
function bindListeners(): void {
// Add event bindings for the lifetime of the application
listeners.forEach(({ target, eventName, action, options }) => target.addEventListener(eventName, action, options));
}
function unbindListeners(): void {
// Remove event bindings after the lifetime of the application (or on hot-module replacement during development)
listeners.forEach(({ target, eventName, action, options }) => target.removeEventListener(eventName, action, options));
}
// Keyboard events
async function shouldRedirectKeyboardEventToBackend(e: KeyboardEvent): Promise<boolean> {
// Don't redirect when a modal is covering the workspace
if (get(dialog).visible) return false;
const key = await getLocalizedScanCode(e);
// TODO: Switch to a system where everything is sent to the backend, then the input preprocessor makes decisions and kicks some inputs back to the frontend
const accelKey = platformIsMac() ? e.metaKey : e.ctrlKey;
// Don't redirect user input from text entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
}
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function
|
(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
}
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
// We
|
onPointerMove
|
identifier_name
|
main.rs
|
match get_args_as_strings() {
Ok(e) => e,
Err(e) => {
println!("\n{}", e);
print_help();
return;
}
};
if args.len() < 4 {
print_help();
return;
};
let mut parse_result: generator::GeneratorInput = match parse(args) {
Ok(s) => s,
Err(e) => {
println!("\nArgument parser error: {}", e);
print_help();
return;
}
};
match parse_result.generate() {
Err(e) => panic!("Generator error: {}", e),
_ => "",
};
}
fn get_args_as_strings() -> Result<Vec<String>, &'static str> {
let mut ret: Vec<String> = Vec::new();
let args = env::args_os();
for cmd in args {
ret.push(match cmd.into_string() {
Ok(c) => c,
_ => return Err("Invalid unicode character found"),
});
}
Ok(ret)
}
fn parse(args: Vec<String>) -> Result<generator::GeneratorInput, String> {
let mut parse_args = args.iter().skip(1); // Skip program name
let mut inp_file: String = String::new();
let mut out_lang: Lang = Lang::Undef;
let mut out_dir: String = String::new();
let mut out_file: String = String::new();
let mut out_hex: bool = false;
while let Some(cmd) = parse_args.next() {
let cmd_name: &str;
if cmd.starts_with("--") {
cmd_name = &cmd[2..];
} else if cmd.starts_with("-") {
cmd_name = &cmd[1..];
} else {
inp_file = String::from(&cmd[..]);
break;
}
match cmd_name {
"l" | "out-language" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Missing language")),
};
out_lang = match value.as_str() {
"c" => Lang::C,
"cshell" => Lang::Cshell,
"pascal" => Lang::Pascal,
"python" => Lang::Python,
"rust" => Lang::Rust,
l @ _ => return Err(format!("Language not implemented: {}", l)),
};
}
"d" | "out-dir" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid directory")),
};
out_dir = String::from(value);
}
"f" | "out-file" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid output file")),
};
out_file = String::from(value);
}
"h" | "hex" => {
out_hex = true;
}
c @ _ => return Err(format!("Unknow command: {}", c)),
}
}
if inp_file.is_empty() {
return Err(String::from("Invalid input file"));
};
if out_dir.is_empty() {
out_dir = String::from("./");
};
Ok(generator::GeneratorInput {
input_file: inp_file,
output_file: out_file,
output_dir: out_dir,
lang: out_lang,
hex: out_hex,
})
}
fn print_help() {
print!(
"
bin2src - version {}
Copyright (C) 2020 {}
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions; for details access LICENSE file at:
https://github.com/gomiero/bin2src/
bin2src - Converts a binary file to an array of bytes, defined at a source of another language, so you can embed it into your program.
Usage: bin2src < -l LANG | --out-lang LANG > [ OPTIONS ] < FILE >
LANG and FILE are required and FILE must be the last argument.
Options:
-l, --out-language LANG specify the language, where LANG={{c|cshell|pascal|python|rust}}
-d, --out-dir PATH specify where to output source(s) file(s);
if not specified, generate in current directory
-f, --out-file OUTFILE specify the output file(s) name (* without extension *);
if not specified, output file(s) will have the same name
of input file (without extra dots).
-h, --hex output bytes in hexadecimal (for C shellcode this flag has
diferent behaviors. See the Github site for more information)
Currently supported languages:
- C
- C for shellcode
- Pascal
- Python
- Rust
", VERSION, AUTHOR);
}
mod generator {
use super::lang::c;
use super::lang::cshell;
use super::lang::pascal;
use super::lang::python;
use super::lang::rust;
use super::Lang;
use std::error::Error;
use std::fs;
use std::io::{BufReader, BufWriter, ErrorKind, Read, Write};
use std::path::PathBuf;
#[inline]
pub fn camel(s: &String) -> String {
let mut ss = s.clone().to_lowercase();
let mut first = ss.remove(0).to_uppercase().to_string();
first.push_str(ss.as_str());
first
}
#[derive(Debug)]
pub struct GeneratorOutput {
pub ifile_name: String,
pub ifile_path: PathBuf,
pub ifile_size: u64,
pub odir_path: PathBuf,
pub ofile_name: String,
pub hex: bool,
}
impl GeneratorOutput {
pub fn open_inp_file(&mut self) -> Result<BufReader<fs::File>, &'static str> {
let inp_file: BufReader<fs::File> =
match fs::OpenOptions::new().read(true).open(&self.ifile_path) {
Ok(f) => BufReader::with_capacity(32768, f),
Err(e) => {
return match e.kind() {
ErrorKind::PermissionDenied => Err("Permission"),
ErrorKind::NotFound => Err("Not found"),
_ => Err("Can't open file"),
}
}
};
Ok(inp_file)
}
pub fn write_data(
&mut self,
f: &mut BufWriter<fs::File>,
numbytes: u64,
write_if: fn(bool, bool, &mut BufWriter<fs::File>, u8) -> Result<(), Box<dyn Error>>,
sep: String,
) -> Result<(), &'static str> {
let mut ifile = self.open_inp_file()?;
let mut doblock = || -> Result<(), Box<dyn Error>> {
let mut buf = [0u8; 4096];
let mut count = 0;
'outter: loop {
let sz = ifile.read(&mut buf[..])?;
if sz == 0 {
f.flush()?;
break;
} else if sz <= 4096 {
for b in 0..sz {
if count == self.ifile_size - 1 {
write_if(self.hex, false, f, buf[b])?;
break 'outter;
};
write_if(self.hex, true, f, buf[b])?;
count += 1;
if count % numbytes == 0 {
write!(f, "{}", sep)?;
};
}
};
}
Ok(())
};
if let Err(_err) = doblock() {
Err("Error when writing data block")
} else {
Ok(())
}
}
pub fn set_output_fname(&mut self) {
if self.ofile_name.is_empty() {
self.ofile_name = self
.ifile_path
.file_stem()
.unwrap()
.to_str()
.unwrap()
.to_string();
if let Some(pos) = self.ofile_name.find(".") {
self.ofile_name.truncate(pos);
}
};
}
}
#[derive(Debug)]
pub struct GeneratorInput {
pub input_file: String,
pub output_file: String,
pub output_dir: String,
pub lang: Lang,
pub hex: bool,
}
impl GeneratorInput {
fn input_file_test(&mut self) -> Result<(String, PathBuf, u64), &'static str> {
let ifpath: PathBuf = PathBuf::from(&self.input_file);
if !(ifpath.exists() || ifpath.is_file()) {
Err("Input file does not exists or is not a file")
} else {
let ifname: String = String::from(ifpath.file_name().unwrap().to_str().unwrap());
let ifsize = ifpath.metadata().unwrap().len();
Ok((ifname, ifpath, ifsize))
}
}
fn output_dir_test(&mut self) -> Result<PathBuf, &'static str> {
let ofpath: PathBuf = PathBuf::from(&self.output_dir);
// Test for output dir
if !(ofpath.exists() || ofpath.is_dir()) {
Err("Output folder does not exists or is inacessible")
} else
|
{
Ok(ofpath)
}
|
conditional_block
|
|
main.rs
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![warn(anonymous_parameters)]
#![warn(bare_trait_objects)]
#![warn(elided_lifetimes_in_paths)]
#![warn(single_use_lifetimes)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
mod lang;
use std::env;
const VERSION: &'static str = "0.0.57";
const AUTHOR: &'static str = "Alexandre Gomiero de Oliveira";
#[derive(Debug)]
pub enum Lang {
C,
Cshell,
Pascal,
Python,
Rust,
Undef,
}
fn main() {
let args: Vec<String> = match get_args_as_strings() {
Ok(e) => e,
Err(e) => {
println!("\n{}", e);
print_help();
return;
}
};
if args.len() < 4 {
print_help();
return;
};
let mut parse_result: generator::GeneratorInput = match parse(args) {
Ok(s) => s,
Err(e) => {
println!("\nArgument parser error: {}", e);
print_help();
return;
}
};
match parse_result.generate() {
Err(e) => panic!("Generator error: {}", e),
_ => "",
};
}
fn get_args_as_strings() -> Result<Vec<String>, &'static str> {
let mut ret: Vec<String> = Vec::new();
let args = env::args_os();
for cmd in args {
ret.push(match cmd.into_string() {
Ok(c) => c,
_ => return Err("Invalid unicode character found"),
});
}
Ok(ret)
}
fn parse(args: Vec<String>) -> Result<generator::GeneratorInput, String> {
let mut parse_args = args.iter().skip(1); // Skip program name
let mut inp_file: String = String::new();
let mut out_lang: Lang = Lang::Undef;
let mut out_dir: String = String::new();
let mut out_file: String = String::new();
let mut out_hex: bool = false;
while let Some(cmd) = parse_args.next() {
let cmd_name: &str;
if cmd.starts_with("--") {
cmd_name = &cmd[2..];
} else if cmd.starts_with("-") {
cmd_name = &cmd[1..];
} else {
inp_file = String::from(&cmd[..]);
break;
}
match cmd_name {
"l" | "out-language" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Missing language")),
};
out_lang = match value.as_str() {
"c" => Lang::C,
"cshell" => Lang::Cshell,
"pascal" => Lang::Pascal,
"python" => Lang::Python,
"rust" => Lang::Rust,
l @ _ => return Err(format!("Language not implemented: {}", l)),
};
}
"d" | "out-dir" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid directory")),
};
out_dir = String::from(value);
}
"f" | "out-file" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid output file")),
};
out_file = String::from(value);
}
"h" | "hex" => {
out_hex = true;
}
c @ _ => return Err(format!("Unknow command: {}", c)),
}
}
if inp_file.is_empty() {
return Err(String::from("Invalid input file"));
};
if out_dir.is_empty() {
out_dir = String::from("./");
};
Ok(generator::GeneratorInput {
input_file: inp_file,
output_file: out_file,
output_dir: out_dir,
lang: out_lang,
hex: out_hex,
})
}
fn print_help() {
print!(
"
bin2src - version {}
Copyright (C) 2020 {}
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions; for details access LICENSE file at:
https://github.com/gomiero/bin2src/
bin2src - Converts a binary file to an array of bytes, defined at a source of another language, so you can embed it into your program.
Usage: bin2src < -l LANG | --out-lang LANG > [ OPTIONS ] < FILE >
LANG and FILE are required and FILE must be the last argument.
Options:
-l, --out-language LANG specify the language, where LANG={{c|cshell|pascal|python|rust}}
-d, --out-dir PATH specify where to output source(s) file(s);
if not specified, generate in current directory
-f, --out-file OUTFILE specify the output file(s) name (* without extension *);
if not specified, output file(s) will have the same name
of input file (without extra dots).
-h, --hex output bytes in hexadecimal (for C shellcode this flag has
diferent behaviors. See the Github site for more information)
Currently supported languages:
- C
- C for shellcode
- Pascal
- Python
- Rust
", VERSION, AUTHOR);
}
mod generator {
use super::lang::c;
use super::lang::cshell;
use super::lang::pascal;
use super::lang::python;
use super::lang::rust;
use super::Lang;
use std::error::Error;
use std::fs;
use std::io::{BufReader, BufWriter, ErrorKind, Read, Write};
use std::path::PathBuf;
#[inline]
pub fn camel(s: &String) -> String {
let mut ss = s.clone().to_lowercase();
let mut first = ss.remove(0).to_uppercase().to_string();
first.push_str(ss.as_str());
first
}
#[derive(Debug)]
pub struct GeneratorOutput {
pub ifile_name: String,
pub ifile_path: PathBuf,
pub ifile_size: u64,
pub odir_path: PathBuf,
pub ofile_name: String,
pub hex: bool,
}
impl GeneratorOutput {
pub fn open_inp_file(&mut self) -> Result<BufReader<fs::File>, &'static str> {
let inp_file: BufReader<fs::File> =
match fs::OpenOptions::new().read(true).open(&self.ifile_path) {
Ok(f) => BufReader::with_capacity(32768, f),
Err(e) => {
return match e.kind() {
ErrorKind::PermissionDenied => Err("Permission"),
ErrorKind::NotFound => Err("Not found"),
_ => Err("Can't open file"),
}
}
};
Ok(inp_file)
}
pub fn write_data(
&mut self,
f: &mut BufWriter<fs::File>,
numbytes: u64,
write_if: fn(bool, bool, &mut BufWriter<fs::File>, u8) -> Result<(), Box<dyn Error>>,
sep: String,
) -> Result<(), &'static str> {
let mut ifile = self.open_inp_file()?;
let mut doblock = || -> Result<(), Box<dyn Error>> {
let mut buf = [0u8; 4096];
let mut count = 0;
'outter: loop {
let sz = ifile.read(&mut buf[..])?;
if sz == 0 {
f.flush()?;
break;
} else if sz <= 4096 {
for b in 0..sz {
if count == self.ifile_size - 1 {
write_if(self.hex, false, f, buf[b])?;
break 'outter;
};
write_if(self.hex, true, f, buf[b])?;
count += 1;
if count % numbytes == 0 {
write!(f, "{}", sep)?;
};
}
};
}
Ok(())
};
if let Err(_err) = doblock() {
Err("Error when writing data block")
} else {
Ok(())
}
}
pub fn set_output_fname(&mut self) {
if self.ofile_name.is_empty() {
self.ofile_name = self
.ifile_path
.file_stem()
.unwrap()
.to_str()
.unwrap()
.to_string();
if let Some(pos) = self.ofile_name.find(".") {
self.ofile_name.truncate(pos);
}
};
}
}
#[derive(Debug)]
pub struct
|
{
pub input_file: String,
pub output_file: String,
pub output_dir: String,
pub lang: Lang,
|
GeneratorInput
|
identifier_name
|
main.rs
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![warn(anonymous_parameters)]
#![warn(bare_trait_objects)]
#![warn(elided_lifetimes_in_paths)]
#![warn(single_use_lifetimes)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
mod lang;
use std::env;
const VERSION: &'static str = "0.0.57";
const AUTHOR: &'static str = "Alexandre Gomiero de Oliveira";
#[derive(Debug)]
pub enum Lang {
C,
Cshell,
Pascal,
Python,
Rust,
Undef,
}
fn main() {
let args: Vec<String> = match get_args_as_strings() {
Ok(e) => e,
Err(e) => {
println!("\n{}", e);
print_help();
return;
}
};
if args.len() < 4 {
print_help();
return;
};
let mut parse_result: generator::GeneratorInput = match parse(args) {
Ok(s) => s,
Err(e) => {
println!("\nArgument parser error: {}", e);
print_help();
return;
}
};
|
_ => "",
};
}
fn get_args_as_strings() -> Result<Vec<String>, &'static str> {
let mut ret: Vec<String> = Vec::new();
let args = env::args_os();
for cmd in args {
ret.push(match cmd.into_string() {
Ok(c) => c,
_ => return Err("Invalid unicode character found"),
});
}
Ok(ret)
}
fn parse(args: Vec<String>) -> Result<generator::GeneratorInput, String> {
let mut parse_args = args.iter().skip(1); // Skip program name
let mut inp_file: String = String::new();
let mut out_lang: Lang = Lang::Undef;
let mut out_dir: String = String::new();
let mut out_file: String = String::new();
let mut out_hex: bool = false;
while let Some(cmd) = parse_args.next() {
let cmd_name: &str;
if cmd.starts_with("--") {
cmd_name = &cmd[2..];
} else if cmd.starts_with("-") {
cmd_name = &cmd[1..];
} else {
inp_file = String::from(&cmd[..]);
break;
}
match cmd_name {
"l" | "out-language" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Missing language")),
};
out_lang = match value.as_str() {
"c" => Lang::C,
"cshell" => Lang::Cshell,
"pascal" => Lang::Pascal,
"python" => Lang::Python,
"rust" => Lang::Rust,
l @ _ => return Err(format!("Language not implemented: {}", l)),
};
}
"d" | "out-dir" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid directory")),
};
out_dir = String::from(value);
}
"f" | "out-file" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid output file")),
};
out_file = String::from(value);
}
"h" | "hex" => {
out_hex = true;
}
c @ _ => return Err(format!("Unknow command: {}", c)),
}
}
if inp_file.is_empty() {
return Err(String::from("Invalid input file"));
};
if out_dir.is_empty() {
out_dir = String::from("./");
};
Ok(generator::GeneratorInput {
input_file: inp_file,
output_file: out_file,
output_dir: out_dir,
lang: out_lang,
hex: out_hex,
})
}
fn print_help() {
print!(
"
bin2src - version {}
Copyright (C) 2020 {}
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions; for details access LICENSE file at:
https://github.com/gomiero/bin2src/
bin2src - Converts a binary file to an array of bytes, defined at a source of another language, so you can embed it into your program.
Usage: bin2src < -l LANG | --out-lang LANG > [ OPTIONS ] < FILE >
LANG and FILE are required and FILE must be the last argument.
Options:
-l, --out-language LANG specify the language, where LANG={{c|cshell|pascal|python|rust}}
-d, --out-dir PATH specify where to output source(s) file(s);
if not specified, generate in current directory
-f, --out-file OUTFILE specify the output file(s) name (* without extension *);
if not specified, output file(s) will have the same name
of input file (without extra dots).
-h, --hex output bytes in hexadecimal (for C shellcode this flag has
diferent behaviors. See the Github site for more information)
Currently supported languages:
- C
- C for shellcode
- Pascal
- Python
- Rust
", VERSION, AUTHOR);
}
mod generator {
use super::lang::c;
use super::lang::cshell;
use super::lang::pascal;
use super::lang::python;
use super::lang::rust;
use super::Lang;
use std::error::Error;
use std::fs;
use std::io::{BufReader, BufWriter, ErrorKind, Read, Write};
use std::path::PathBuf;
#[inline]
pub fn camel(s: &String) -> String {
let mut ss = s.clone().to_lowercase();
let mut first = ss.remove(0).to_uppercase().to_string();
first.push_str(ss.as_str());
first
}
#[derive(Debug)]
pub struct GeneratorOutput {
pub ifile_name: String,
pub ifile_path: PathBuf,
pub ifile_size: u64,
pub odir_path: PathBuf,
pub ofile_name: String,
pub hex: bool,
}
impl GeneratorOutput {
pub fn open_inp_file(&mut self) -> Result<BufReader<fs::File>, &'static str> {
let inp_file: BufReader<fs::File> =
match fs::OpenOptions::new().read(true).open(&self.ifile_path) {
Ok(f) => BufReader::with_capacity(32768, f),
Err(e) => {
return match e.kind() {
ErrorKind::PermissionDenied => Err("Permission"),
ErrorKind::NotFound => Err("Not found"),
_ => Err("Can't open file"),
}
}
};
Ok(inp_file)
}
pub fn write_data(
&mut self,
f: &mut BufWriter<fs::File>,
numbytes: u64,
write_if: fn(bool, bool, &mut BufWriter<fs::File>, u8) -> Result<(), Box<dyn Error>>,
sep: String,
) -> Result<(), &'static str> {
let mut ifile = self.open_inp_file()?;
let mut doblock = || -> Result<(), Box<dyn Error>> {
let mut buf = [0u8; 4096];
let mut count = 0;
'outter: loop {
let sz = ifile.read(&mut buf[..])?;
if sz == 0 {
f.flush()?;
break;
} else if sz <= 4096 {
for b in 0..sz {
if count == self.ifile_size - 1 {
write_if(self.hex, false, f, buf[b])?;
break 'outter;
};
write_if(self.hex, true, f, buf[b])?;
count += 1;
if count % numbytes == 0 {
write!(f, "{}", sep)?;
};
}
};
}
Ok(())
};
if let Err(_err) = doblock() {
Err("Error when writing data block")
} else {
Ok(())
}
}
pub fn set_output_fname(&mut self) {
if self.ofile_name.is_empty() {
self.ofile_name = self
.ifile_path
.file_stem()
.unwrap()
.to_str()
.unwrap()
.to_string();
if let Some(pos) = self.ofile_name.find(".") {
self.ofile_name.truncate(pos);
}
};
}
}
#[derive(Debug)]
pub struct GeneratorInput {
pub input_file: String,
pub output_file: String,
pub output_dir: String,
pub lang: Lang,
|
match parse_result.generate() {
Err(e) => panic!("Generator error: {}", e),
|
random_line_split
|
production_example.py
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # required only for graphs
from autots import AutoTS, load_live_daily, create_regressor
fred_key = None # https://fred.stlouisfed.org/docs/api/api_key.html
gsa_key = None
forecast_name = "example"
graph = True # whether to plot graphs
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
frequency = (
"D" # "infer" for automatic alignment, but specific offsets are most reliable, 'D' is daily
)
forecast_length = 60 # number of periods to forecast ahead
drop_most_recent = 1 # whether to discard the n most recent records (as incomplete)
num_validations = (
2 # number of cross validation runs. More is better but slower, usually
)
validation_method = "backwards" # "similarity", "backwards", "seasonal 364"
n_jobs = "auto" # or set to number of CPU cores
prediction_interval = (
0.9 # sets the upper and lower forecast range by probability range. Bigger = wider
)
initial_training = "auto" # set this to True on first run, or on reset, 'auto' looks for existing template, if found, sets to False.
evolve = True # allow time series to progressively evolve on each run, if False, uses fixed template
archive_templates = True # save a copy of the model template used with a timestamp
save_location = None # "C:/Users/Colin/Downloads" # directory to save templates to. Defaults to working dir
template_filename = f"autots_forecast_template_{forecast_name}.csv"
forecast_csv_name = None # f"autots_forecast_{forecast_name}.csv" # or None, point forecast only is written
model_list = "fast_parallel_no_arima"
transformer_list = "fast" # 'superfast'
transformer_max_depth = 5
models_mode = "default" # "deep", "regressor"
initial_template = 'random' # 'random' 'general+random'
preclean = None
{ # preclean option
"fillna": 'ffill',
"transformations": {"0": "EWMAFilter"},
"transformation_params": {
"0": {"span": 14},
},
}
back_forecast = False
start_time = datetime.datetime.now()
if save_location is not None:
template_filename = os.path.join(save_location, template_filename)
if forecast_csv_name is not None:
forecast_csv_name = os.path.join(save_location, forecast_csv_name)
if initial_training == "auto":
initial_training = not os.path.exists(template_filename)
if initial_training:
print("No existing template found.")
else:
print("Existing template found.")
# set max generations based on settings, increase for slower but greater chance of highest accuracy
# if include_ensemble is specified in import_templates, ensembles can progressively nest over generations
if initial_training:
gens = 100
generation_timeout = 10000 # minutes
models_to_validate = 0.15
ensemble = ["horizontal-max", "dist", "simple"] # , "mosaic", "mosaic-window", 'mlensemble'
elif evolve:
gens = 50
generation_timeout = 480 # minutes
models_to_validate = 0.15
ensemble = ["horizontal-max", "dist", "simple"] # "mosaic", "mosaic-window", "subsample"
else:
gens = 0
generation_timeout = 60 # minutes
models_to_validate = 0.99
ensemble = ["horizontal-max", "dist", "simple"] # "mosaic", "mosaic-window",
# only save the very best model if not evolve
if evolve:
n_export = 50
else:
n_export = 1 # wouldn't be a bad idea to do > 1, allowing some future adaptability
"""
Begin dataset retrieval
"""
fred_series = [
"DGS10",
"T5YIE",
"SP500",
"DCOILWTICO",
"DEXUSEU",
"BAMLH0A0HYM2",
"DAAA",
"DEXUSUK",
"T10Y2Y",
]
tickers = ["MSFT", "PG"]
trend_list = ["forecasting", "msft", "p&g"]
weather_event_types = ["%28Z%29+Winter+Weather", "%28Z%29+Winter+Storm"]
wikipedia_pages = ['all', 'Microsoft', "Procter_%26_Gamble", "YouTube", "United_States"]
df = load_live_daily(
long=False,
fred_key=fred_key,
fred_series=fred_series,
tickers=tickers,
trends_list=trend_list,
earthquake_min_magnitude=5,
weather_years=3,
london_air_days=700,
wikipedia_pages=wikipedia_pages,
gsa_key=gsa_key,
gov_domain_list=None, # ['usajobs.gov', 'usps.com', 'weather.gov'],
gov_domain_limit=700,
weather_event_types=weather_event_types,
sleep_seconds=15,
)
# be careful of very noisy, large value series mixed into more well-behaved data as they can skew some metrics such that they get most of the attention
# remove "volume" data as it skews MAE (other solutions are to adjust metric_weighting towards SMAPE, use series `weights`, or pre-scale data)
df = df[[x for x in df.columns if "_volume" not in x]]
# remove dividends and stock splits as it skews metrics
df = df[[x for x in df.columns if "_dividends" not in x]]
df = df[[x for x in df.columns if "stock_splits" not in x]]
# scale 'wiki_all' to millions to prevent too much skew of MAE
if 'wiki_all' in df.columns:
df['wiki_all_millions'] = df['wiki_all'] / 1000000
df = df.drop(columns=['wiki_all'])
# manual NaN cleaning where real values are easily approximated, this is the way
# although if you have 'no good idea' why it is random, auto is best
# note manual pre-cleaning affects VALIDATION significantly (for better or worse)
# as NaN times in history are skipped by metrics, but filled values, as added here, are evaluated
if trend_list is not None:
for tx in trend_list:
if tx in df.columns:
df[tx] = df[tx].interpolate('akima').fillna(method='ffill', limit=30).fillna(method='bfill', limit=30)
# fill weekends
if tickers is not None:
for fx in tickers:
for suffix in ["_high", "_low", "_open", "_close"]:
fxs = (fx + suffix).lower()
if fxs in df.columns:
df[fxs] = df[fxs].interpolate('akima')
if fred_series is not None:
for fx in fred_series:
if fx in df.columns:
df[fx] = df[fx].interpolate('akima')
if weather_event_types is not None:
wevnt = [x for x in df.columns if "_Events" in x]
df[wevnt] = df[wevnt].mask(df[wevnt].notnull().cummax(), df[wevnt].fillna(0))
# most of the NaN here are just weekends, when financial series aren't collected, ffill of a few steps is fine
# partial forward fill, no back fill
df = df.fillna(method='ffill', limit=3)
df = df[df.index.year > 1999]
# remove any data from the future
df = df[df.index <= start_time]
# remove series with no recent data
df = df.dropna(axis="columns", how="all")
min_cutoff_date = start_time - datetime.timedelta(days=180)
most_recent_date = df.notna()[::-1].idxmax()
drop_cols = most_recent_date[most_recent_date < min_cutoff_date].index.tolist()
df = df.drop(columns=drop_cols)
print(
f"Series with most NaN: {df.head(365).isnull().sum().sort_values(ascending=False).head(5)}"
)
df.to_csv(f"training_data_{forecast_name}.csv")
# df = pd.read_csv(f"training_data_{forecast_name}.csv", index_col=0, parse_dates=[0])
# example future_regressor with some things we can glean from data and datetime index
# note this only accepts `wide` style input dataframes
# and this is optional, not required for the modeling
# also create macro_micro before inclusion
regr_train, regr_fcst = create_regressor(
df,
forecast_length=forecast_length,
frequency=frequency,
drop_most_recent=drop_most_recent,
scale=True,
summarize="auto",
backfill="bfill",
fill_na="s
|
patch_sklearn()
except Exception as e:
print(repr(e))
import json
import datetime
|
random_line_split
|
|
production_example.py
|
in x]]
df = df[[x for x in df.columns if "stock_splits" not in x]]
# scale 'wiki_all' to millions to prevent too much skew of MAE
if 'wiki_all' in df.columns:
df['wiki_all_millions'] = df['wiki_all'] / 1000000
df = df.drop(columns=['wiki_all'])
# manual NaN cleaning where real values are easily approximated, this is the way
# although if you have 'no good idea' why it is random, auto is best
# note manual pre-cleaning affects VALIDATION significantly (for better or worse)
# as NaN times in history are skipped by metrics, but filled values, as added here, are evaluated
if trend_list is not None:
for tx in trend_list:
if tx in df.columns:
df[tx] = df[tx].interpolate('akima').fillna(method='ffill', limit=30).fillna(method='bfill', limit=30)
# fill weekends
if tickers is not None:
for fx in tickers:
for suffix in ["_high", "_low", "_open", "_close"]:
fxs = (fx + suffix).lower()
if fxs in df.columns:
df[fxs] = df[fxs].interpolate('akima')
if fred_series is not None:
for fx in fred_series:
if fx in df.columns:
df[fx] = df[fx].interpolate('akima')
if weather_event_types is not None:
wevnt = [x for x in df.columns if "_Events" in x]
df[wevnt] = df[wevnt].mask(df[wevnt].notnull().cummax(), df[wevnt].fillna(0))
# most of the NaN here are just weekends, when financial series aren't collected, ffill of a few steps is fine
# partial forward fill, no back fill
df = df.fillna(method='ffill', limit=3)
df = df[df.index.year > 1999]
# remove any data from the future
df = df[df.index <= start_time]
# remove series with no recent data
df = df.dropna(axis="columns", how="all")
min_cutoff_date = start_time - datetime.timedelta(days=180)
most_recent_date = df.notna()[::-1].idxmax()
drop_cols = most_recent_date[most_recent_date < min_cutoff_date].index.tolist()
df = df.drop(columns=drop_cols)
print(
f"Series with most NaN: {df.head(365).isnull().sum().sort_values(ascending=False).head(5)}"
)
df.to_csv(f"training_data_{forecast_name}.csv")
# df = pd.read_csv(f"training_data_{forecast_name}.csv", index_col=0, parse_dates=[0])
# example future_regressor with some things we can glean from data and datetime index
# note this only accepts `wide` style input dataframes
# and this is optional, not required for the modeling
# also create macro_micro before inclusion
regr_train, regr_fcst = create_regressor(
df,
forecast_length=forecast_length,
frequency=frequency,
drop_most_recent=drop_most_recent,
scale=True,
summarize="auto",
backfill="bfill",
fill_na="spline",
holiday_countries={"US": None}, # requires holidays package
encode_holiday_type=True,
# datepart_method="simple_2",
)
# remove the first forecast_length rows (because those are lost in regressor)
df = df.iloc[forecast_length:]
regr_train = regr_train.iloc[forecast_length:]
print("data setup completed, beginning modeling")
"""
Begin modeling
"""
metric_weighting = {
'smape_weighting': 3,
'mae_weighting': 2,
'rmse_weighting': 1,
'made_weighting': 1,
'mage_weighting': 0,
'mle_weighting': 0,
'imle_weighting': 0,
'spl_weighting': 3,
'dwae_weighting': 1,
'runtime_weighting': 0.05,
}
model = AutoTS(
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
ensemble=ensemble,
model_list=model_list,
transformer_list=transformer_list,
transformer_max_depth=transformer_max_depth,
max_generations=gens,
metric_weighting=metric_weighting,
initial_template=initial_template,
aggfunc="first",
models_to_validate=models_to_validate,
model_interrupt=True,
num_validations=num_validations,
validation_method=validation_method,
constraint=None,
drop_most_recent=drop_most_recent, # if newest data is incomplete, also remember to increase forecast_length
preclean=preclean,
models_mode=models_mode,
# no_negatives=True,
# subset=100,
# prefill_na=0,
# remove_leading_zeroes=True,
current_model_file=f"current_model_{forecast_name}",
generation_timeout=generation_timeout,
n_jobs=n_jobs,
verbose=1,
)
if not initial_training:
if evolve:
model.import_template(template_filename, method="addon")
else:
model.import_template(template_filename, method="only")
model = model.fit(
df,
future_regressor=regr_train,
)
# save a template of best models
if initial_training or evolve:
model.export_template(
template_filename,
models="best",
n=n_export,
max_per_model_class=6,
include_results=True,
)
if archive_templates:
arc_file = f"{template_filename.split('.csv')[0]}_{start_time.strftime('%Y%m%d%H%M')}.csv"
model.export_template(arc_file, models="best", n=1)
prediction = model.predict(
future_regressor=regr_fcst, verbose=2, fail_on_forecast_nan=True
)
# Print the details of the best model
print(model)
"""
Process results
"""
# point forecasts dataframe
forecasts_df = prediction.forecast # .fillna(0).round(0)
if forecast_csv_name is not None:
forecasts_df.to_csv(forecast_csv_name)
forecasts_upper_df = prediction.upper_forecast
forecasts_lower_df = prediction.lower_forecast
# accuracy of all tried model results
model_results = model.results()
validation_results = model.results("validation")
print(f"Model failure rate is {model.failure_rate() * 100:.1f}%")
print(f'The following model types failed completely {model.list_failed_model_types()}')
print("Slowest models:")
print(
model_results[model_results["Ensemble"] < 1]
.groupby("Model")
.agg({"TotalRuntimeSeconds": ["mean", "max"]})
.idxmax()
)
model_parameters = json.loads(model.best_model["ModelParameters"].iloc[0])
# model.export_template("all_results.csv", models='all')
if graph:
with plt.style.context("bmh"):
start_date = 'auto' # '2021-01-01'
prediction.plot_grid(model.df_wide_numeric, start_date=start_date)
plt.show()
scores = model.best_model_per_series_mape().index.tolist()
scores = [x for x in scores if x in df.columns]
worst = scores[0:6]
prediction.plot_grid(model.df_wide_numeric, start_date=start_date, title="Worst Performing Forecasts", cols=worst)
plt.show()
best = scores[-6:]
prediction.plot_grid(model.df_wide_numeric, start_date=start_date, title="Best Performing Forecasts", cols=best)
plt.show()
if model.best_model_name == "Cassandra":
prediction.model.plot_components(
prediction, series=None, to_origin_space=True, start_date=start_date
)
plt.show()
prediction.model.plot_trend(
series=None, start_date=start_date
)
plt.show()
ax = model.plot_per_series_mape()
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
if back_forecast:
model.plot_backforecast()
plt.show()
ax = model.plot_validations()
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = model.plot_validations(subset='best')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = model.plot_validations(subset='worst')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
if model.best_model_ensemble == 2:
|
plt.subplots_adjust(bottom=0.5)
model.plot_horizontal_transformers()
plt.show()
model.plot_horizontal_model_count()
plt.show()
model.plot_horizontal()
plt.show()
# plt.savefig("horizontal.png", dpi=300, bbox_inches="tight")
if str(model_parameters["model_name"]).lower() in ["mosaic", "mosaic-window"]:
mosaic_df = model.mosaic_to_df()
print(mosaic_df[mosaic_df.columns[0:5]].head(5))
|
conditional_block
|
|
ip6.go
|
}
bytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only
|
if err != nil {
return err
|
random_line_split
|
|
ip6.go
|
ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0
|
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only if RoutingType == 0.
SourceRoutingIPs []net
|
{
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
|
conditional_block
|
ip6.go
|
ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error
|
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only if RoutingType == 0.
SourceRoutingIPs []net
|
{
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
|
identifier_body
|
ip6.go
|
(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
payload := b.Bytes()
if ip6.HopByHop != nil {
return fmt.Errorf("unable to serialize hopbyhop for now")
}
bytes, err := b.PrependBytes(40)
if err != nil {
return err
}
bytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
Segments
|
SerializeTo
|
identifier_name
|
|
pod.go
|
od.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
}
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues
if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" {
attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] != "" {
node, err := cli.CoreV1().Nodes().Get(context.TODO(), n[0], metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "%s %s", errAccessingNode, n[0])
}
if !IsNodeReady(node) || !IsNodeSchedulable(node) {
return errors.Errorf("Node %s is currently not ready/schedulable", n[0])
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func getVolStatus(ctx context.Context, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
for _, vol := range p.Spec.Volumes {
if err := checkPVCAndPVStatus(ctx, vol, p, cli, namespace); err != nil {
return err
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func checkPVCAndPVStatus(ctx context.Context, vol v1.Volume, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
if vol.VolumeSource.PersistentVolumeClaim == nil {
// wait for timeout
return nil
}
pvcName := vol.VolumeSource.PersistentVolumeClaim.ClaimName
pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// Do not return err, wait for timeout, since sometimes in case of statefulsets, they trigger creation of a volume
return nil
} else {
return errors.Wrapf(err, "Failed to get PVC %s", pvcName)
}
}
switch pvc.Status.Phase {
case v1.ClaimLost:
return errors.Errorf("PVC %s assoicated with pod %s has status: %s", pvcName, p.Name, v1.ClaimLost)
case v1.ClaimPending:
pvName := pvc.Spec.VolumeName
if pvName == "" {
// wait for timeout
return nil
}
pv, err := cli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// wait for timeout
return nil
} else {
return errors.Wrapf(err, "Failed to get PV %s", pvName)
}
}
if pv.Status.Phase == v1.VolumeFailed {
return errors.Errorf("PV %s associated with PVC %s has status: %s message: %s reason: %s namespace: %s", pvName, pvcName, v1.VolumeFailed, pv.Status.Message, pv.Status.Reason, namespace)
}
}
return nil
}
// WaitForPodCompletion waits for a pod to reach a terminal state, or timeout
func
|
WaitForPodCompletion
|
identifier_name
|
|
pod.go
|
[]metav1.OwnerReference
EnvironmentVariables []v1.EnvVar
Lifecycle *v1.Lifecycle
}
func GetPodObjectFromPodOptions(cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
// If Namespace is not specified, use the controller Namespace.
cns, err := GetControllerNamespace()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get controller namespace")
}
ns := opts.Namespace
if ns == "" {
ns = cns
}
// If a ServiceAccount is not specified and we are in the controller's
// namespace, use the same service account as the controller.
sa := opts.ServiceAccountName
if sa == "" && ns == cns {
sa, err = GetControllerServiceAccount(cli)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Controller Service Account")
}
}
if opts.RestartPolicy == "" {
opts.RestartPolicy = v1.RestartPolicyNever
}
volumeMounts, podVolumes, err := createFilesystemModeVolumeSpecs(opts.Volumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create volume spec")
}
volumeDevices, blockVolumes, err := createBlockModeVolumeSpecs(opts.BlockVolumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create raw block volume spec")
}
podVolumes = append(podVolumes, blockVolumes...)
defaultSpecs := v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: opts.Image,
Command: opts.Command,
ImagePullPolicy: v1.PullPolicy(v1.PullIfNotPresent),
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Resources: opts.Resources,
},
},
// RestartPolicy dictates when the containers of the pod should be
// restarted. The possible values include Always, OnFailure and Never
// with Never being the default. OnFailure policy will result in
// failed containers being restarted with an exponential back-off delay.
RestartPolicy: opts.RestartPolicy,
Volumes: podVolumes,
ServiceAccountName: sa,
}
if opts.EnvironmentVariables != nil && len(opts.EnvironmentVariables) > 0 {
defaultSpecs.Containers[0].Env = opts.EnvironmentVariables
}
// Patch default Pod Specs if needed
patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Failed to override pod specs. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
// Always put the main container the first
sort.Slice(patchedSpecs.Containers, func(i, j int) bool {
return patchedSpecs.Containers[i].Name == defaultContainerName
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: opts.GenerateName,
Labels: map[string]string{
consts.LabelKeyCreatedBy: consts.LabelValueKanister,
},
},
Spec: patchedSpecs,
}
// Override `GenerateName` if `Name` option is provided
if opts.Name != "" {
pod.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error)
|
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues
if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" {
attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] !=
|
{
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
}
|
identifier_body
|
pod.go
|
if ns == "" {
ns = cns
}
// If a ServiceAccount is not specified and we are in the controller's
// namespace, use the same service account as the controller.
sa := opts.ServiceAccountName
if sa == "" && ns == cns {
sa, err = GetControllerServiceAccount(cli)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Controller Service Account")
}
}
if opts.RestartPolicy == "" {
opts.RestartPolicy = v1.RestartPolicyNever
}
volumeMounts, podVolumes, err := createFilesystemModeVolumeSpecs(opts.Volumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create volume spec")
}
volumeDevices, blockVolumes, err := createBlockModeVolumeSpecs(opts.BlockVolumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create raw block volume spec")
}
podVolumes = append(podVolumes, blockVolumes...)
defaultSpecs := v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: opts.Image,
Command: opts.Command,
ImagePullPolicy: v1.PullPolicy(v1.PullIfNotPresent),
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Resources: opts.Resources,
},
},
// RestartPolicy dictates when the containers of the pod should be
// restarted. The possible values include Always, OnFailure and Never
// with Never being the default. OnFailure policy will result in
// failed containers being restarted with an exponential back-off delay.
RestartPolicy: opts.RestartPolicy,
Volumes: podVolumes,
ServiceAccountName: sa,
}
if opts.EnvironmentVariables != nil && len(opts.EnvironmentVariables) > 0 {
defaultSpecs.Containers[0].Env = opts.EnvironmentVariables
}
// Patch default Pod Specs if needed
patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Failed to override pod specs. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
// Always put the main container the first
sort.Slice(patchedSpecs.Containers, func(i, j int) bool {
return patchedSpecs.Containers[i].Name == defaultContainerName
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: opts.GenerateName,
Labels: map[string]string{
consts.LabelKeyCreatedBy: consts.LabelValueKanister,
},
},
Spec: patchedSpecs,
}
// Override `GenerateName` if `Name` option is provided
if opts.Name != "" {
pod.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
}
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues
if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" {
attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] != ""
|
{
node, err := cli.CoreV1().Nodes().Get(context.TODO(), n[0], metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "%s %s", errAccessingNode, n[0])
}
if !IsNodeReady(node) || !IsNodeSchedulable(node) {
return errors.Errorf("Node %s is currently not ready/schedulable", n[0])
}
}
|
conditional_block
|
|
pod.go
|
[]metav1.OwnerReference
EnvironmentVariables []v1.EnvVar
Lifecycle *v1.Lifecycle
}
func GetPodObjectFromPodOptions(cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
// If Namespace is not specified, use the controller Namespace.
cns, err := GetControllerNamespace()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get controller namespace")
}
ns := opts.Namespace
if ns == "" {
ns = cns
}
// If a ServiceAccount is not specified and we are in the controller's
// namespace, use the same service account as the controller.
sa := opts.ServiceAccountName
if sa == "" && ns == cns {
sa, err = GetControllerServiceAccount(cli)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Controller Service Account")
}
}
if opts.RestartPolicy == "" {
opts.RestartPolicy = v1.RestartPolicyNever
}
volumeMounts, podVolumes, err := createFilesystemModeVolumeSpecs(opts.Volumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create volume spec")
}
volumeDevices, blockVolumes, err := createBlockModeVolumeSpecs(opts.BlockVolumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create raw block volume spec")
}
podVolumes = append(podVolumes, blockVolumes...)
defaultSpecs := v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: opts.Image,
Command: opts.Command,
ImagePullPolicy: v1.PullPolicy(v1.PullIfNotPresent),
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Resources: opts.Resources,
},
},
// RestartPolicy dictates when the containers of the pod should be
// restarted. The possible values include Always, OnFailure and Never
// with Never being the default. OnFailure policy will result in
// failed containers being restarted with an exponential back-off delay.
RestartPolicy: opts.RestartPolicy,
Volumes: podVolumes,
ServiceAccountName: sa,
}
if opts.EnvironmentVariables != nil && len(opts.EnvironmentVariables) > 0 {
defaultSpecs.Containers[0].Env = opts.EnvironmentVariables
}
// Patch default Pod Specs if needed
patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Failed to override pod specs. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
// Always put the main container the first
sort.Slice(patchedSpecs.Containers, func(i, j int) bool {
return patchedSpecs.Containers[i].Name == defaultContainerName
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: opts.GenerateName,
Labels: map[string]string{
consts.LabelKeyCreatedBy: consts.LabelValueKanister,
},
},
Spec: patchedSpecs,
}
// Override `GenerateName` if `Name` option is provided
if opts.Name != "" {
pod.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
}
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues
|
attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] != "" {
|
if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" {
|
random_line_split
|
js_PassengerEdit.js
|
2,'0')+" "+date.getHours().toString().padLeft(2,'0'));
}
else if(fg==2)//yyyy-MM-dd HH:mm
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0'));
} else if(fg==3)//yyyy-MM-dd HH:mm:ss
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0')+":"+date.getSeconds().toString().padLeft(2,'0'));
}
return d1;
}
function initArr(arr,num) {
for(var i=1;i<=num;i++) {
eval("var obj={_"+i+":0}");
arr.push(obj);
}
return arr;
}
//重置序号状态
function resetFg(arr,val) {
for(var i=0;i<arr.length;i++) {
for(var m in arr[i]) {
arr[i][m]=val;
}
}
}
//设置序号状态
function setFg(arr,key,val) {
for(var i=0;i<arr.length;i++) {
for(var j in arr[i]) {
if(j=="_"+key) {
arr[i][j]=val;
}
}
}
}
//获取最小没有使用的序号
function getMinFg(arr) {
var index="0";
var istrue=false;
for(var i=0;i<arr.length;i++) {
if(istrue) {
break;
}
for(var key in arr[i]) {
if(arr[i][key]=="0") {
index=key.replace("_","");
istrue=true;
break;
}
}
}
return index;
}
function ddlSetText(ddlObj,flag,num) {
var ddlVal=jQuery.trim(jQuery(ddlObj).val()).split('-')[0].toUpperCase();
jQuery("#"+flag+"_"+num).val(ddlVal);
}
function txtSetSel(txtObj,flag,num) {
var txtVal=jQuery(txtObj).val().toUpperCase();
if(txtVal!="") {
jQuery("#"+flag+"_"+num+" option[value*='"+txtVal+"']").attr("selected",true);
} else {
jQuery("#"+flag+"_"+num+" option").eq(0).attr("selected",true);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
retur
|
var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function (data) {
if(jQuery.trim(data)!="") {
var strArr=data.split('@@');
if(strArr.length==2) {
if(strArr[0]=="1") {
showdialog(strArr[1]);
} else {
showdialog(strArr[1]);
}
}
} else {
showdialog("操作失败!");
}
},"text");
return false;
|
n false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
|
conditional_block
|
js_PassengerEdit.js
|
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0'));
} else if(fg==1)//yyyy-MM-dd HH
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0'));
}
else if(fg==2)//yyyy-MM-dd HH:mm
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0'));
} else if(fg==3)//yyyy-MM-dd HH:mm:ss
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0')+":"+date.getSeconds().toString().padLeft(2,'0'));
}
return d1;
}
function initArr(arr,num) {
for(var i=1;i<=num;i++) {
eval("var obj={_"+i+":0}");
arr.push(obj);
}
return arr;
}
//重置序号状态
function resetFg(arr,val) {
for(var i=0;i<arr.length;i++) {
for(var m in arr[i]) {
arr[i][m]=val;
}
}
}
//设置序号状态
function setFg(arr,key,val) {
for(var i=0;i<arr.length;i++) {
for(var j in arr[i]) {
if(j=="_"+key) {
arr[i][j]=val;
}
}
}
}
//获取最小没有使用的序号
function getMinFg(arr) {
var index="0";
var istrue=false;
for(var i=0;i<arr.length;i++) {
if(istrue) {
break;
}
for(var key in arr[i]) {
if(arr[i][key]=="0") {
index=key.replace("_","");
istrue=true;
break;
}
}
}
return index;
}
function ddlSetText(ddlObj,flag,num) {
var ddlVal=jQuery.trim(jQuery(ddlObj).val()).split('-')[0].toUpperCase();
jQuery("#"+flag+"_"+num).val(ddlVal);
}
function txtSetSel(txtObj,flag,num) {
var txtVal=jQuery(txtObj).val().toUpperCase();
if(txtVal!="") {
jQuery("#"+flag+"_"+num+" option[value*='"+txtVal+"']").attr("selected",true);
} else {
jQuery("#"+flag+"_"+num+" option").eq(0).attr("selected",true);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
return false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function
|
-MM-dd
|
identifier_name
|
|
js_PassengerEdit.js
|
(2,'0')+" "+date.getHours().toString().padLeft(2,'0'));
}
else if(fg==2)//yyyy-MM-dd HH:mm
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0'));
} else if(fg==3)//yyyy-MM-dd HH:mm:ss
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0')+":"+date.getSeconds().toString().padLeft(2,'0'));
}
return d1;
}
function initArr(arr,num) {
for(var i=1;i<=num;i++) {
eval("var obj={_"+i+":0}");
arr.push(obj);
}
return arr;
}
//重置序号状态
function resetFg(arr,val) {
for(var i=0;i<arr.length;i++) {
for(var m in arr[i]) {
arr[i][m]=val;
}
}
}
//设置序号状态
function setFg(arr,key,val) {
for(var i=0;i<arr.length;i++) {
for(var j in arr[i]) {
if(j=="_"+key) {
arr[i][j]=val;
}
}
}
}
//获取最小没有使用的序号
function getMinFg(arr) {
var index="0";
var istrue=false;
for(var i=0;i<arr.length;i++) {
|
}
for(var key in arr[i]) {
if(arr[i][key]=="0") {
index=key.replace("_","");
istrue=true;
break;
}
}
}
return index;
}
function ddlSetText(ddlObj,flag,num) {
var ddlVal=jQuery.trim(jQuery(ddlObj).val()).split('-')[0].toUpperCase();
jQuery("#"+flag+"_"+num).val(ddlVal);
}
function txtSetSel(txtObj,flag,num) {
var txtVal=jQuery(txtObj).val().toUpperCase();
if(txtVal!="") {
jQuery("#"+flag+"_"+num+" option[value*='"+txtVal+"']").attr("selected",true);
} else {
jQuery("#"+flag+"_"+num+" option").eq(0).attr("selected",true);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
return false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function (data) {
if(jQuery.trim(data)!="") {
var strArr=data.split('@@');
if(strArr.length==2) {
if(strArr[0]=="1") {
showdialog(strArr[1]);
} else {
showdialog(strArr[1]);
}
}
} else {
showdialog("操作失败!");
}
},"text");
return false;
}
//
|
if(istrue) {
break;
|
random_line_split
|
js_PassengerEdit.js
|
);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
return false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function (data) {
if(jQuery.trim(data)!="") {
var strArr=data.split('@@');
if(strArr.length==2) {
if(strArr[0]=="1") {
showdialog(strArr[1]);
} else {
showdialog(strArr[1]);
}
}
} else {
showdialog("操作失败!");
}
},"text");
return false;
}
//选择旅客类型
function PasTypeChange() {
var text=jQuery(this).attr('txt');
var val=jQuery(this).val();
var opData=jQuery.trim(jQuery("#Hid_CardData").val()).split('|');
var ophtml=[];
var opArr=[];
for(var i=0;i<opData.length;i++) {
opArr=opData[i].split('@@');
if(text.indexOf('成人')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
} else if(text.indexOf('儿童')!= -1) {
if(opData[i].indexOf('身份证')!= -1||opData[i].indexOf('出生日期')!= -1||opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
} else if(text.indexOf('婴儿')!= -1) {
if(opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
}
}
jQuery("#ddlCardType").html(ophtml.join(''));
jQuery("#ddlCardType option:visible").eq(0).attr("selected",true);
CardTypeChange();
}
//选择证件类型
function CardTypeChange() {
var val=jQuery(this).val();
var text=jQuery("#ddlCardType option:selected").text();
var pasType=jQuery("input[type='radio'][name='pastype']:checked").attr("txt");
if(pasType.indexOf('成人')!= -1) {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
} else if(pasType.indexOf('儿童')!= -1) {
if(text.indexOf("出生日期")!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
} else {
jQuery("#txtCardNum").show();
jQuery("#tx
|
tDate").hide();
}
} else if(pasType.indexOf('婴儿')!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
}
}
//加载。。。
jQuery(function () {
//初始化航空公司和卡号数
initArr(carryArr,maxCarryNum);
var IsEdit=jQuery("#Hid_IsEdit").val();
//单击旅客类型事件
jQuery("input[type='radio'][name='pastype']").click(PasTypeChange);
jQuery("#ddlCardType").change(CardTypeChange);
if(IsEdit=="1") {
|
identifier_body
|
|
translator.ts
|
includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
}
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private
|
(field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = {
caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
}
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection) {
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
// level: undefined
}
}
for (collection of Object.values(this.collections)) {
if (collection.parent && !this.collections[collection.parent]) {
// collection
|
typefield
|
identifier_name
|
translator.ts
|
includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
}
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private typefield(field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = {
caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
}
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection)
|
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
// level: undefined
}
}
for (collection of Object.values(this.collections)) {
if (collection.parent && !this.collections[collection.parent]) {
// collection.parent
|
{
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
|
conditional_block
|
translator.ts
|
includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
}
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private typefield(field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = {
|
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection) {
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
// level: undefined
}
}
for (collection of Object.values(this.collections)) {
if (collection.parent && !this.collections[collection.parent]) {
// collection
|
caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
}
|
random_line_split
|
translator.ts
|
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private typefield(field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = {
caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
}
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection) {
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
//
|
{
// collections: jabref 4 stores collection info inside the reference, and collection info depends on which part of your library you're exporting
if (['collections'].includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
}
|
identifier_body
|
|
dposhandler.go
|
// only candidate node is able to participant to this process.
return;
}
pm.lock.Lock()
defer pm.lock.Unlock()
log.Info("Preparing for next big period...");
// pull the newest delegators from voting contract.
a, b, err0 := VotingAccessor.Refresh()
if err0 != nil {
log.Error(err0.Error())
return;
}
DelegatorsTable = a
DelegatorNodeInfo = b
if uint8(len(GigPeriodHistory)) >= BigPeriodHistorySize {
GigPeriodHistory = GigPeriodHistory[1:] //remove the first old one.
}
if len(DelegatorsTable) == 0 || pm.ethManager.peers.Len() == 0 {
log.Info("Sorry, could not detect any delegator!");
return;
}
round := uint64(1)
activeTime := uint64(time.Now().Unix() + int64(GigPeriodInterval))
if NextGigPeriodInstance != nil {
if !TestMode {
gap := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if gap > 2 || gap < -2 {
log.Warn(fmt.Sprintf("Scheduling of the new electing round is improper! current gap: %v seconds", gap))
//restart the scheduler
NextElectionInfo = nil;
go pm.syncDelegatedNodeSafely();
return;
}
}
round = NextGigPeriodInstance.round + 1
activeTime = GigPeriodInstance.activeTime + uint64(GigPeriodInterval)
// keep the big period history for block validation.
GigPeriodHistory[len(GigPeriodHistory)-1] = *NextGigPeriodInstance;
GigPeriodInstance = &GigPeriodTable{
NextGigPeriodInstance.round,
NextGigPeriodInstance.state,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
NextGigPeriodInstance.confirmedTickets,
NextGigPeriodInstance.confirmedBestNode,
NextGigPeriodInstance.activeTime,
};
log.Info(fmt.Sprintf("Switched the new big period round. %d ", GigPeriodInstance.round));
}
// make sure all delegators are synced at this round.
NextGigPeriodInstance = &GigPeriodTable{
round,
STATE_LOOKING,
DelegatorsTable,
SignCandidates(DelegatorsTable),
make(map[string]uint32),
make(map[string]*GigPeriodTable),
activeTime,
};
pm.trySyncAllDelegators()
}
func (pm *DPoSProtocolManager) trySyncAllDelegators() {
if TestMode {
return;
}
//send this round to all delegated peers.
//all delegated must giving the response in SYNC_BIGPERIOD_RESPONSE state.
for _, delegator := range NextGigPeriodInstance.delegatedNodes {
// make sure all delegator are alive.
if pm.ethManager.peers.Peer(delegator) == nil {
// try to add DelegatorNodeInfo[i] into peers table.
// but can't talk to it directly.
for i,e := range DelegatorsTable {
if e == delegator {
pm.eth.server.AddPeer(DelegatorNodeInfo[i]);
break;
}
}
} else {
err := pm.ethManager.peers.Peer(delegator).SendSyncBigPeriodRequest(
&SyncBigPeriodRequest{NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
currNodeIdHash});
if err != nil {
log.Debug("Error occurred while sending SyncBigPeriodRequest: " + err.Error())
}
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {
pm.lock.Lock()
defer pm.lock.Unlock()
// Handle the message depending on its contents
switch {
case msg.Code == SYNC_BIGPERIOD_REQUEST:
var request SyncBigPeriodRequest;
if err := msg.Decode(&request); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
if DelegatorsTable == nil || len(DelegatorsTable) == 0 {
// i am not ready.
log.Info("I am not ready!!!")
return nil;
}
if request.Round == NextGigPeriodInstance.round {
if NextGigPeriodInstance.state == STATE_CONFIRMED {
log.Debug(fmt.Sprintf("I am in the agreed round %v", NextGigPeriodInstance.round));
// if i have already confirmed this round. send this round to peer.
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash});
} else {
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
if len(DelegatorsTable) < len(request.DelegatedTable) {
// refresh table if mismatch.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
log.Debug("Delegators are mismatched in two tables.");
if TestMode {
return nil;
}
// both delegators are not matched, both lose the election power of this round.
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_DNUMBER,
currNodeIdHash});
}
}
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = request.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;
NextGigPeriodInstance.activeTime = request.ActiveTime;
pm.setNextRoundTimer();//sync the timer.
log.Debug(fmt.Sprintf("Agreed this table %v as %v round", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));
if TestMode {
return nil;
}
// broadcast it to all peers again.
for _, peer := range pm.ethManager.peers.peers {
err := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash})
if (err != nil) {
log.Warn("Error occurred while sending VoteElectionRequest: " + err.Error())
}
}
}
} else if request.Round < NextGigPeriodInstance.round {
log.Debug(fmt.Sprintf("Mismatched request.round %v, CurrRound %v: ", request.Round, NextGigPeriodInstance.round))
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_ROUND,
currNodeIdHash});
} else if request.Round > NextGigPeriodInstance.round {
if (request.Round - NextElectionInfo.round) == 1 {
// the most reason could be the round timeframe switching later than this request.
// but we are continue switching as regular.
} else {
// attack happens.
}
}
case msg.Code == SYNC_BIGPERIOD_RESPONSE:
var response SyncBigPeriodResponse;
if err := msg.Decode(&response); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if response.Round != NextGigPeriodInstance.round {
return nil;
}
if SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
nodeId := common.Bytes2Hex(response.NodeId)
log.Debug("Received SYNC Big Period response: " + nodeId);
NextGigPeriodInstance.confirmedTickets[nodeId] ++;
|
NextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{
response.Round,
|
random_line_split
|
|
dposhandler.go
|
, ";")
ids := make([]string, len(delegatorIds))
peerinfo := make([]*discover.Node, len(delegatorIds))
for i,delegatorId := range delegatorIds {
// call delegatorInfo(string) 0x6162630000000000000000000000000000000000000000000000000000000000
data1, err0 := d.dappabi.Pack("delegatorInfo", delegatorId)
if err0 != nil {
log.Error("Error to parse delegatorInfo function.")
return nil,nil, errors.New("Error to parse delegatorInfo function.")
}
output1, err0 := d.doCall(data1)
if err0 != nil {
log.Error("Error to call delegatorInfo function.")
return nil,nil, errors.New("Error to call delegatorInfo function.")
}
var result DelegatedNodeInfoMapping
//string ip, uint port, uint256 ticket
err0 = d.dappabi.Unpack(&result, "result", output1)
if err0 != nil {
log.Error("Error to parse the result of delegatorInfo function.")
return nil,nil, errors.New("Error to parse the result of delegatorInfo function.")
}
ids[i] = delegatorId
peerinfo[i] = &discover.Node{}
}
return ids, peerinfo, nil;
}
func (d *DelegatorAccessorImpl) doCall(data []byte) ([]byte, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
ctx := context.Background()
state, header, err := d.b.StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber)
if state == nil || err != nil {
return nil, err
}
// Set sender address or use a default if none specified
addr := common.Address{};
if !TestMode {
if wallets := d.b.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
}
// Set default gas & gas price if none were set
defaultGasPrice := uint64(50 * config.Shannon)
gas, gasPrice := uint64(math.MaxUint64 / 2), new(big.Int).SetUint64(defaultGasPrice)
// Create new call message
msg := types.NewMessage(addr, &core.DPOSBallotContractAddress, 0, new(big.Int), gas, gasPrice, data, false)
// Setup context so it may be cancelled the call has completed.
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
// Get a new instance of the EVM.
evm, vmError, err := d.b.GetEVM(ctx, msg, state, header, vm.Config{})
if err != nil {
return nil, err
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
<-ctx.Done()
evm.Cancel()
}()
// Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
res, gas, _, err := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil {
return nil, err
}
return res, err
}
type DPoSProtocolManager struct {
networkId uint64;
eth *JuchainService;
ethManager *ProtocolManager;
blockchain *core.BlockChain;
lock *sync.Mutex; // protects running
packager *dpos.Packager;
t1 *time.Timer; // global synchronized timer.
}
// NewProtocolManager returns a new obod sub protocol manager. The JuchainService sub protocol manages peers capable
// with the obod network.
func NewDPoSProtocolManager(eth *JuchainService, ethManager *ProtocolManager, config *config.ChainConfig, config2 *node.Config,
mode downloader.SyncMode, networkId uint64, blockchain *core.BlockChain, engine consensus.Engine) (*DPoSProtocolManager, error) {
// Set sender address or use a default if none specified
// Create the protocol manager with the base fields
manager := &DPoSProtocolManager{
networkId: networkId,
eth: eth,
ethManager: ethManager,
blockchain: blockchain,
lock: &sync.Mutex{},
packager: dpos.NewPackager(config, engine, DefaultConfig.Etherbase, eth, eth.EventMux()),
}
currNodeId = discover.PubkeyID(&config2.NodeKey().PublicKey).TerminalString();
currNodeIdHash = common.Hex2Bytes(currNodeId);
if TestMode {
VotingAccessor = &DelegatorAccessorTestImpl{currNodeId:currNodeId, currNodeIdHash:currNodeIdHash};
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh();
} else {
/**
var addr common.Address;
if wallets := eth.ApiBackend.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
if addr == (common.Address{}) {
log.Error("We must have a default address to activate dpos delegator consensus")
return nil, errors.New("we must have a default address to activate dpos delegator consensus")
}*/
dappabi, err := abi.JSON(strings.NewReader(core.DPOSBallotABI))
if err != nil {
log.Error("Unable to load DPoS Ballot ABI object!")
return nil, errors.New("Unable to load DPoS Ballot ABI object!")
}
VotingAccessor = &DelegatorAccessorImpl{dappabi: dappabi, blockchain: eth.blockchain, b: eth.ApiBackend};
DelegatorsTable, DelegatorNodeInfo, err = VotingAccessor.Refresh();
}
return manager, nil;
}
func (pm *DPoSProtocolManager) Start() {
if pm.isDelegatedNode() {
log.Info("I am a delegator.")
pm.packager.Start();
go pm.schedule();
if !TestMode {
time.AfterFunc(time.Second*time.Duration(SmallPeriodInterval), pm.syncDelegatedNodeSafely) //initial attempt.
}
}
}
func (pm *DPoSProtocolManager) schedule() {
t2 := time.NewTimer(time.Second * time.Duration(1))
for {
select {
case <-t2.C:
go pm.roundRobinSafely();
t2 = time.NewTimer(time.Second * time.Duration(1))
}
}
}
// this is a loop function for electing node.
func (pm *DPoSProtocolManager) syncDelegatedNodeSafely() {
if !pm.isDelegatedNode() {
// only candidate node is able to participant to this process.
return;
}
pm.lock.Lock()
defer pm.lock.Unlock()
log.Info("Preparing for next big period...");
// pull the newest delegators from voting contract.
a, b, err0 := VotingAccessor.Refresh()
if err0 != nil {
log.Error(err0.Error())
return;
}
DelegatorsTable = a
DelegatorNodeInfo = b
if uint8(len(GigPeriodHistory)) >= BigPeriodHistorySize {
GigPeriodHistory = GigPeriodHistory[1:] //remove the first old one.
}
if len(DelegatorsTable) == 0 || pm.ethManager.peers.Len() == 0 {
log.Info("Sorry, could not detect any delegator!");
return;
}
round := uint64(1)
activeTime := uint64(time.Now().Unix() + int64(GigPeriodInterval))
if NextGigPeriodInstance != nil
|
{
if !TestMode {
gap := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if gap > 2 || gap < -2 {
log.Warn(fmt.Sprintf("Scheduling of the new electing round is improper! current gap: %v seconds", gap))
//restart the scheduler
NextElectionInfo = nil;
go pm.syncDelegatedNodeSafely();
return;
}
}
round = NextGigPeriodInstance.round + 1
activeTime = GigPeriodInstance.activeTime + uint64(GigPeriodInterval)
// keep the big period history for block validation.
GigPeriodHistory[len(GigPeriodHistory)-1] = *NextGigPeriodInstance;
GigPeriodInstance = &GigPeriodTable{
NextGigPeriodInstance.round,
NextGigPeriodInstance.state,
NextGigPeriodInstance.delegatedNodes,
|
conditional_block
|
|
dposhandler.go
|
an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {
pm.lock.Lock()
defer pm.lock.Unlock()
// Handle the message depending on its contents
switch {
case msg.Code == SYNC_BIGPERIOD_REQUEST:
var request SyncBigPeriodRequest;
if err := msg.Decode(&request); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
if DelegatorsTable == nil || len(DelegatorsTable) == 0 {
// i am not ready.
log.Info("I am not ready!!!")
return nil;
}
if request.Round == NextGigPeriodInstance.round {
if NextGigPeriodInstance.state == STATE_CONFIRMED {
log.Debug(fmt.Sprintf("I am in the agreed round %v", NextGigPeriodInstance.round));
// if i have already confirmed this round. send this round to peer.
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash});
} else {
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
if len(DelegatorsTable) < len(request.DelegatedTable) {
// refresh table if mismatch.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
log.Debug("Delegators are mismatched in two tables.");
if TestMode {
return nil;
}
// both delegators are not matched, both lose the election power of this round.
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_DNUMBER,
currNodeIdHash});
}
}
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = request.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;
NextGigPeriodInstance.activeTime = request.ActiveTime;
pm.setNextRoundTimer();//sync the timer.
log.Debug(fmt.Sprintf("Agreed this table %v as %v round", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));
if TestMode {
return nil;
}
// broadcast it to all peers again.
for _, peer := range pm.ethManager.peers.peers {
err := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash})
if (err != nil) {
log.Warn("Error occurred while sending VoteElectionRequest: " + err.Error())
}
}
}
} else if request.Round < NextGigPeriodInstance.round {
log.Debug(fmt.Sprintf("Mismatched request.round %v, CurrRound %v: ", request.Round, NextGigPeriodInstance.round))
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_ROUND,
currNodeIdHash});
} else if request.Round > NextGigPeriodInstance.round {
if (request.Round - NextElectionInfo.round) == 1 {
// the most reason could be the round timeframe switching later than this request.
// but we are continue switching as regular.
} else {
// attack happens.
}
}
case msg.Code == SYNC_BIGPERIOD_RESPONSE:
var response SyncBigPeriodResponse;
if err := msg.Decode(&response); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if response.Round != NextGigPeriodInstance.round {
return nil;
}
if SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
nodeId := common.Bytes2Hex(response.NodeId)
log.Debug("Received SYNC Big Period response: " + nodeId);
NextGigPeriodInstance.confirmedTickets[nodeId] ++;
NextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{
response.Round,
STATE_CONFIRMED,
response.DelegatedTable,
response.DelegatedTableSign,
nil,
nil,
response.ActiveTime,
};
maxTickets, bestNodeId := uint32(0), "";
for key, value := range NextGigPeriodInstance.confirmedTickets {
if maxTickets < value {
maxTickets = value;
bestNodeId = key;
}
}
if NextGigPeriodInstance.state == STATE_CONFIRMED {
// set the best node as the final state.
bestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];
NextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;
NextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;
NextGigPeriodInstance.activeTime = bestNode.activeTime;
log.Debug(fmt.Sprintf("Updated the best table: %v", bestNode.delegatedNodes));
pm.setNextRoundTimer();
} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = response.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;
NextGigPeriodInstance.activeTime = response.ActiveTime;
pm.setNextRoundTimer();
} else if response.State == STATE_MISMATCHED_ROUND {
// force to create new round
NextGigPeriodInstance = &GigPeriodTable{
response.Round,
STATE_LOOKING,
response.DelegatedTable,
response.DelegatedTableSign,
make(map[string]uint32),
make(map[string]*GigPeriodTable),
response.ActiveTime,
};
pm.trySyncAllDelegators()
} else if response.State == STATE_MISMATCHED_DNUMBER {
// refresh table only, and this node loses the election power of this round.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
return nil;
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
return nil
}
func (pm *DPoSProtocolManager) setNextRoundTimer() {
leftTime := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if leftTime < 1 {
log.Warn("Discard this round due to the expiration of the active time.")
go pm.syncDelegatedNodeSafely()
return;
}
if pm.t1 != nil {
// potentially could be an issue if the timer is unable to be cancelled.
pm.t1.Stop()
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
} else {
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
}
log.Debug(fmt.Sprintf("scheduled for next round in %v seconds", leftTime))
}
// the node would not be a candidate if it is not qualified.
func (pm *DPoSProtocolManager) isDelegatedNode() bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == currNodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) isDelegatedNode2(nodeId string) bool
|
{
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == nodeId {
return true;
}
}
return false;
}
|
identifier_body
|
|
dposhandler.go
|
legatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
currNodeIdHash});
if err != nil {
log.Debug("Error occurred while sending SyncBigPeriodRequest: " + err.Error())
}
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {
pm.lock.Lock()
defer pm.lock.Unlock()
// Handle the message depending on its contents
switch {
case msg.Code == SYNC_BIGPERIOD_REQUEST:
var request SyncBigPeriodRequest;
if err := msg.Decode(&request); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
if DelegatorsTable == nil || len(DelegatorsTable) == 0 {
// i am not ready.
log.Info("I am not ready!!!")
return nil;
}
if request.Round == NextGigPeriodInstance.round {
if NextGigPeriodInstance.state == STATE_CONFIRMED {
log.Debug(fmt.Sprintf("I am in the agreed round %v", NextGigPeriodInstance.round));
// if i have already confirmed this round. send this round to peer.
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash});
} else {
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
if len(DelegatorsTable) < len(request.DelegatedTable) {
// refresh table if mismatch.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
log.Debug("Delegators are mismatched in two tables.");
if TestMode {
return nil;
}
// both delegators are not matched, both lose the election power of this round.
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_DNUMBER,
currNodeIdHash});
}
}
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = request.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;
NextGigPeriodInstance.activeTime = request.ActiveTime;
pm.setNextRoundTimer();//sync the timer.
log.Debug(fmt.Sprintf("Agreed this table %v as %v round", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));
if TestMode {
return nil;
}
// broadcast it to all peers again.
for _, peer := range pm.ethManager.peers.peers {
err := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash})
if (err != nil) {
log.Warn("Error occurred while sending VoteElectionRequest: " + err.Error())
}
}
}
} else if request.Round < NextGigPeriodInstance.round {
log.Debug(fmt.Sprintf("Mismatched request.round %v, CurrRound %v: ", request.Round, NextGigPeriodInstance.round))
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_ROUND,
currNodeIdHash});
} else if request.Round > NextGigPeriodInstance.round {
if (request.Round - NextElectionInfo.round) == 1 {
// the most reason could be the round timeframe switching later than this request.
// but we are continue switching as regular.
} else {
// attack happens.
}
}
case msg.Code == SYNC_BIGPERIOD_RESPONSE:
var response SyncBigPeriodResponse;
if err := msg.Decode(&response); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if response.Round != NextGigPeriodInstance.round {
return nil;
}
if SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
nodeId := common.Bytes2Hex(response.NodeId)
log.Debug("Received SYNC Big Period response: " + nodeId);
NextGigPeriodInstance.confirmedTickets[nodeId] ++;
NextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{
response.Round,
STATE_CONFIRMED,
response.DelegatedTable,
response.DelegatedTableSign,
nil,
nil,
response.ActiveTime,
};
maxTickets, bestNodeId := uint32(0), "";
for key, value := range NextGigPeriodInstance.confirmedTickets {
if maxTickets < value {
maxTickets = value;
bestNodeId = key;
}
}
if NextGigPeriodInstance.state == STATE_CONFIRMED {
// set the best node as the final state.
bestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];
NextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;
NextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;
NextGigPeriodInstance.activeTime = bestNode.activeTime;
log.Debug(fmt.Sprintf("Updated the best table: %v", bestNode.delegatedNodes));
pm.setNextRoundTimer();
} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = response.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;
NextGigPeriodInstance.activeTime = response.ActiveTime;
pm.setNextRoundTimer();
} else if response.State == STATE_MISMATCHED_ROUND {
// force to create new round
NextGigPeriodInstance = &GigPeriodTable{
response.Round,
STATE_LOOKING,
response.DelegatedTable,
response.DelegatedTableSign,
make(map[string]uint32),
make(map[string]*GigPeriodTable),
response.ActiveTime,
};
pm.trySyncAllDelegators()
} else if response.State == STATE_MISMATCHED_DNUMBER {
// refresh table only, and this node loses the election power of this round.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
return nil;
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
return nil
}
func (pm *DPoSProtocolManager) setNextRoundTimer() {
leftTime := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if leftTime < 1 {
log.Warn("Discard this round due to the expiration of the active time.")
go pm.syncDelegatedNodeSafely()
return;
}
if pm.t1 != nil {
// potentially could be an issue if the timer is unable to be cancelled.
pm.t1.Stop()
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
} else {
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
}
log.Debug(fmt.Sprintf("scheduled for next round in %v seconds", leftTime))
}
// the node would not be a candidate if it is not qualified.
func (pm *DPoSProtocolManager) isDelegatedNode() bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == currNodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager)
|
isDelegatedNode2
|
identifier_name
|
|
staging.py
|
under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def __init__(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
|
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
self.registry = registry
self.staging_area = staging_area
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError:
|
return
|
conditional_block
|
staging.py
|
under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def
|
(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
return
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
self.registry = registry
self.staging_area = staging_area
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError
|
__init__
|
identifier_name
|
staging.py
|
under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def __init__(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
return
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
|
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError:
|
self.registry = registry
self.staging_area = staging_area
|
identifier_body
|
staging.py
|
under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
|
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def __init__(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
return
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
self.registry = registry
self.staging_area = staging_area
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError:
|
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
|
random_line_split
|
rozetka_webscrapper.py
|
#sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill")
if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
#parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def
|
(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace(" ", "_").replace(":","").replace(".", "")).replace("/+", "/"),
filtered_parsed_site_data,
lambda o : o.toJson(),
encoding='utf-8'
)
return parsed_site_data
#top category 'https://rozetka.com.ua/computers-notebooks/c80253/'
#parsed_category = parse_category('https://rozetka.com.ua/computers-notebooks/c80253/')
#print(parsed_category)
#specific category 'https://rozetka.com.ua/notebooks/c80004/'
#parsed_group = parse_specific_items
|
parse_category
|
identifier_name
|
rozetka_webscrapper.py
|
") #sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill")
|
#parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def parse_category(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace(" ", "_").replace(":","").replace(".", "")).replace("/+", "/"),
filtered_parsed_site_data,
lambda o : o.toJson(),
encoding='utf-8'
)
return parsed_site_data
#top category 'https://rozetka.com.ua/computers-notebooks/c80253/'
#parsed_category = parse_category('https://rozetka.com.ua/computers-notebooks/c80253/')
#print(parsed_category)
#specific category 'https://rozetka.com.ua/notebooks/c80004/'
#parsed_group = parse_specific_items
|
if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
|
random_line_split
|
rozetka_webscrapper.py
|
def parse_comment(comment):
parsed_comment = Comment()
comment_author = comment.find(class_="comment__author")
if comment_author:
comment_date = comment.find(class_="comment__date")
if comment_date:
parsed_comment.date = decode_str(comment_date.get_text())
comment_date.decompose()
parsed_comment.author = decode_str(comment_author.get_text())
comment_link = comment.find(class_="comment__link")
if comment_link:
parsed_comment.url = comment_link.get("href")
comment_vars_list = comment.find(class_="comment__vars-list") #sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill")
if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
#parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def parse_category(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace
|
return unicodestr
|
identifier_body
|
|
rozetka_webscrapper.py
|
") #sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill")
if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
#parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
|
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def parse_category(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace(" ", "_").replace(":","").replace(".", "")).replace("/+", "/"),
filtered_parsed_site_data,
lambda o : o.toJson(),
encoding='utf-8'
)
return parsed_site_data
#top category 'https://rozetka.com.ua/computers-notebooks/c80253/'
#parsed_category = parse_category('https://rozetka.com.ua/computers-notebooks/c80253/')
#print(parsed_category)
#specific category 'https://rozetka.com.ua/notebooks/c80004/'
#parsed_group = parse_specific_items
|
comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break
|
conditional_block
|
value.rs
|
use serde::de::{Deserialize, Deserializer, Error as DeError, Visitor, SeqVisitor, MapVisitor};
use serde::de::impls::VecVisitor;
use serde_json;
use error::Error;
/// The type which represents the key for maps used throughout the Ardite
/// codebase.
///
/// Functions similarly to an object key in JavaScript.
pub type Key = String;
/// Represents a [JSON pointer][1] to a document property. Examples of a
/// pointer in this context include `/hello/world` or `/a/b/c/d`.
///
/// These pointers are represented as a list of keys.
///
/// [1]: https://duckduckgo.com/?q=json+pointer&atb=v1&ia=about
pub type Pointer = Vec<Key>;
/// Ordered representation of a map of key/value pairs, like a JSON object.
/// Backed by a linear map to maintain order and have high performance for
/// small objects.
// TODO: newtype pattern?
pub type Object = LinearMap<Key, Value>;
/// Ordered array of values, like a JSON array.
// TODO: newtype pattern?
pub type Array = Vec<Value>;
/// Various value types. Based on types in the [JSON standard][1] (see section
/// 5).
///
/// [1]: http://ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
#[derive(PartialEq, Clone, Debug)]
pub enum Value {
/// The abscense of any value.
Null,
/// True or false.
Boolean(bool),
/// An integer numeric value.
I64(i64),
/// A floating point numeric value.
F64(f64),
/// A list of characters.
String(String),
/// A map of key/value pairs.
Object(Object),
/// A list of values.
Array(Array)
}
impl Value {
/// Gets a value at a specific point. Helpful for retrieving nested values.
pub fn get(&self, mut pointer: Pointer) -> Option<&Value> {
match *self {
Value::Object(ref map) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = map.get(&pointer.remove(0)) {
value.get(pointer)
} else {
None
}
},
Value::Array(ref vec) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = pointer.remove(0).parse::<usize>().ok().map_or(None, |i| vec.get(i)) {
value.get(pointer)
} else {
None
}
},
_ => if pointer.is_empty() { Some(self) } else { None }
}
}
/// Creates a `Value` from a JSON string.
pub fn from_json(json: &str) -> Result<Value, Error> {
serde_json::from_str(json).map_err(Error::from)
}
/// Converts a `Value` into a JSON string.
pub fn to_json(&self) -> Result<String, Error> {
serde_json::to_string(self).map_err(Error::from)
}
/// Converts a `Value` into a nice and indented JSON string.
pub fn to_json_pretty(&self) -> Result<String, Error> {
serde_json::to_string_pretty(self).map_err(Error::from)
}
}
impl Serialize for Value {
#[inline]
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
match *self {
Value::Null => serializer.serialize_unit(),
Value::Boolean(value) => serializer.serialize_bool(value),
Value::I64(value) => serializer.serialize_i64(value),
Value::F64(value) => serializer.serialize_f64(value),
Value::String(ref value) => serializer.serialize_str(&value),
Value::Array(ref value) => value.serialize(serializer),
Value::Object(ref value) => value.serialize(serializer)
}
}
}
impl Deserialize for Value {
#[inline]
fn deserialize<D>(deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer {
struct ValueVisitor;
impl Visitor for ValueVisitor {
type Value = Value;
#[inline] fn visit_bool<E>(&mut self, value: bool) -> Result<Value, E> { Ok(Value::Boolean(value)) }
#[inline] fn visit_u64<E>(&mut self, value: u64) -> Result<Value, E> { Ok(Value::I64(value as i64)) }
#[inline] fn visit_i64<E>(&mut self, value: i64) -> Result<Value, E> { Ok(Value::I64(value)) }
#[inline] fn visit_f64<E>(&mut self, value: f64) -> Result<Value, E> { Ok(Value::F64(value)) }
#[inline] fn visit_str<E>(&mut self, value: &str) -> Result<Value, E> where E: DeError { self.visit_string(value.to_owned()) }
#[inline] fn visit_string<E>(&mut self, value: String) -> Result<Value, E> { Ok(Value::String(value)) }
#[inline] fn visit_none<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_some<D>(&mut self, deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer { Deserialize::deserialize(deserializer) }
#[inline] fn visit_unit<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_seq<V>(&mut self, visitor: V) -> Result<Value, V::Error> where V: SeqVisitor { let values = try!(VecVisitor::new().visit_seq(visitor)); Ok(Value::Array(values)) }
#[inline]
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Value, V::Error> where V: MapVisitor {
let mut object = LinearMap::with_capacity(visitor.size_hint().0);
while let Some((key, value)) = try!(visitor.visit()) {
object.insert(key, value);
}
try!(visitor.end());
Ok(Value::Object(object))
}
}
deserializer.deserialize(ValueVisitor)
}
}
impl<V> From<Option<V>> for Value where V: Into<Value> {
fn from(option: Option<V>) -> Self {
match option {
None => Value::Null,
Some(value) => value.into()
}
}
}
impl From<bool> for Value {
fn from(boolean: bool) -> Self {
Value::Boolean(boolean)
}
}
impl From<i64> for Value {
fn from(number: i64) -> Self {
Value::I64(number)
}
}
impl From<f64> for Value {
fn from(number: f64) -> Self {
Value::F64(number)
}
}
impl From<String> for Value {
fn from(string: String) -> Self {
Value::String(string)
}
}
impl<'a> From<&'a str> for Value {
fn from(string: &'a str) -> Self {
Value::from(string.to_owned())
}
}
/// An iterator of values. Used by drivers to convert their own iterator
/// implementations into a single type.
pub struct ValueIter<'a> {
iter: Box<Iterator<Item=Value> + 'a>
}
impl<'a> ValueIter<'a> {
/// Create a new value iterator.
pub fn new<I>(iter: I) -> Self where I: Iterator<Item=Value> + 'a {
ValueIter {
iter: Box::new(iter)
}
}
}
impl<'a> Iterator for ValueIter<'a> {
type Item = Value;
#[inline]
fn next(&mut self) -> Option<Value> {
self.iter.next()
}
}
#[cfg(test)]
mod tests {
use value::Value;
#[test]
fn test_get_primitive() {
assert_eq!(value!().get(point![]).cloned(), Some(value!()));
assert_eq!(value!().get(point!["hello"]).cloned(), None);
assert_eq!(value!().get(point!["a", "b", "c", "d", "e"]).cloned(), None);
assert_eq!(value!(true).get(point![]).cloned(), Some(value!(true)));
assert_eq!(value!(true).get(point!["hello"]).cloned(), None);
assert_eq!(value!(36).get(point![]).cloned(), Some(value!(36)));
assert_eq!(value!(36).get(point!["hello"]).cloned(), None);
assert_eq!(value!("world").get(point![]).cloned(), Some(value!("world")));
assert_eq!(value!("world").get(point!["hello"]).cloned(), None);
}
#[test]
fn test_get_object() {
let object = value!({
"hello" => true,
"world" => 8,
"yolo" => "swag",
"5" => (),
"moon" => {
"hello" => "yoyo"
}
});
assert_eq!(object.get(point![]).cloned(), Some(object.clone()));
assert_eq!(object.get(point!["hello"]).cloned(), Some(value!(true)));
assert
|
//! the driver to these types.
use linear_map::LinearMap;
use serde::ser::{Serialize, Serializer};
|
random_line_split
|
|
value.rs
|
Error> where D: Deserializer {
struct ValueVisitor;
impl Visitor for ValueVisitor {
type Value = Value;
#[inline] fn visit_bool<E>(&mut self, value: bool) -> Result<Value, E> { Ok(Value::Boolean(value)) }
#[inline] fn visit_u64<E>(&mut self, value: u64) -> Result<Value, E> { Ok(Value::I64(value as i64)) }
#[inline] fn visit_i64<E>(&mut self, value: i64) -> Result<Value, E> { Ok(Value::I64(value)) }
#[inline] fn visit_f64<E>(&mut self, value: f64) -> Result<Value, E> { Ok(Value::F64(value)) }
#[inline] fn visit_str<E>(&mut self, value: &str) -> Result<Value, E> where E: DeError { self.visit_string(value.to_owned()) }
#[inline] fn visit_string<E>(&mut self, value: String) -> Result<Value, E> { Ok(Value::String(value)) }
#[inline] fn visit_none<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_some<D>(&mut self, deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer { Deserialize::deserialize(deserializer) }
#[inline] fn visit_unit<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_seq<V>(&mut self, visitor: V) -> Result<Value, V::Error> where V: SeqVisitor { let values = try!(VecVisitor::new().visit_seq(visitor)); Ok(Value::Array(values)) }
#[inline]
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Value, V::Error> where V: MapVisitor {
let mut object = LinearMap::with_capacity(visitor.size_hint().0);
while let Some((key, value)) = try!(visitor.visit()) {
object.insert(key, value);
}
try!(visitor.end());
Ok(Value::Object(object))
}
}
deserializer.deserialize(ValueVisitor)
}
}
impl<V> From<Option<V>> for Value where V: Into<Value> {
fn from(option: Option<V>) -> Self {
match option {
None => Value::Null,
Some(value) => value.into()
}
}
}
impl From<bool> for Value {
fn from(boolean: bool) -> Self {
Value::Boolean(boolean)
}
}
impl From<i64> for Value {
fn from(number: i64) -> Self {
Value::I64(number)
}
}
impl From<f64> for Value {
fn from(number: f64) -> Self {
Value::F64(number)
}
}
impl From<String> for Value {
fn from(string: String) -> Self {
Value::String(string)
}
}
impl<'a> From<&'a str> for Value {
fn from(string: &'a str) -> Self {
Value::from(string.to_owned())
}
}
/// An iterator of values. Used by drivers to convert their own iterator
/// implementations into a single type.
pub struct ValueIter<'a> {
iter: Box<Iterator<Item=Value> + 'a>
}
impl<'a> ValueIter<'a> {
/// Create a new value iterator.
pub fn new<I>(iter: I) -> Self where I: Iterator<Item=Value> + 'a {
ValueIter {
iter: Box::new(iter)
}
}
}
impl<'a> Iterator for ValueIter<'a> {
type Item = Value;
#[inline]
fn next(&mut self) -> Option<Value> {
self.iter.next()
}
}
#[cfg(test)]
mod tests {
use value::Value;
#[test]
fn test_get_primitive() {
assert_eq!(value!().get(point![]).cloned(), Some(value!()));
assert_eq!(value!().get(point!["hello"]).cloned(), None);
assert_eq!(value!().get(point!["a", "b", "c", "d", "e"]).cloned(), None);
assert_eq!(value!(true).get(point![]).cloned(), Some(value!(true)));
assert_eq!(value!(true).get(point!["hello"]).cloned(), None);
assert_eq!(value!(36).get(point![]).cloned(), Some(value!(36)));
assert_eq!(value!(36).get(point!["hello"]).cloned(), None);
assert_eq!(value!("world").get(point![]).cloned(), Some(value!("world")));
assert_eq!(value!("world").get(point!["hello"]).cloned(), None);
}
#[test]
fn test_get_object() {
let object = value!({
"hello" => true,
"world" => 8,
"yolo" => "swag",
"5" => (),
"moon" => {
"hello" => "yoyo"
}
});
assert_eq!(object.get(point![]).cloned(), Some(object.clone()));
assert_eq!(object.get(point!["hello"]).cloned(), Some(value!(true)));
assert_eq!(object.get(point!["yolo"]).cloned(), Some(value!("swag")));
assert_eq!(object.get(point!["5"]).cloned(), Some(value!()));
assert_eq!(object.get(point!["world", "hello"]).cloned(), None);
assert_eq!(object.get(point!["moon", "hello"]).cloned(), Some(value!("yoyo")));
assert_eq!(object.get(point!["moon", "nope"]).cloned(), None);
}
#[test]
fn test_get_array() {
let array = value!([
false,
64,
{
"hello" => true,
"world" => false,
"moon" => {
"goodbye" => "yoyo"
}
},
[[1, 2, 3], 4, 5 ]
]);
assert_eq!(array.get(point![]).cloned(), Some(array.clone()));
assert_eq!(array.get(point!["0"]).cloned(), Some(value!(false)));
assert_eq!(array.get(point!["1"]).cloned(), Some(value!(64)));
assert_eq!(array.get(point!["2", "hello"]).cloned(), Some(value!(true)));
assert_eq!(array.get(point!["2", "moon", "goodbye"]).cloned(), Some(value!("yoyo")));
assert_eq!(array.get(point!["length"]).cloned(), None);
assert_eq!(array.get(point!["3", "0", "1"]).cloned(), Some(value!(2)));
}
#[test]
fn test_from_json() {
assert_eq!(Value::from_json("null").unwrap(), value!());
assert_eq!(Value::from_json("true").unwrap(), value!(true));
assert_eq!(Value::from_json("false").unwrap(), value!(false));
assert_eq!(Value::from_json("7").unwrap(), value!(7));
assert_eq!(Value::from_json("3.3").unwrap(), value!(3.3));
assert_eq!(Value::from_json(r#""Hello,\n\"world\"!""#).unwrap(), value!("Hello,\n\"world\"!"));
assert_eq!(Value::from_json(r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#).unwrap(), value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}));
assert_eq!(
Value::from_json(r#"["world",3.3,{"hello":"world"},null,null,[1,2,3],null]"#).unwrap(),
value!(["world", 3.3, { "hello" => "world" }, (), (), [1, 2, 3], ()])
);
}
#[test]
fn test_to_json()
|
{
assert_eq!(&value!().to_json().unwrap(), "null");
assert_eq!(&value!(true).to_json().unwrap(), "true");
assert_eq!(&value!(false).to_json().unwrap(), "false");
assert_eq!(&value!(7).to_json().unwrap(), "7");
assert_eq!(&value!(6.667).to_json().unwrap(), "6.667");
assert_eq!(&value!("Hello,\n\"world\"!").to_json().unwrap(), r#""Hello,\n\"world\"!""#);
assert_eq!(&value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}).to_json().unwrap(), r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#);
assert_eq!(
&value!(["world", 3.333, { "hello" => "world" }, (), (), [1, 2, 3], ()]).to_json().unwrap(),
r#"["world",3.333,{"hello":"world"},null,null,[1,2,3],null]"#
);
}
|
identifier_body
|
|
value.rs
|
section
/// 5).
///
/// [1]: http://ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
#[derive(PartialEq, Clone, Debug)]
pub enum Value {
/// The abscense of any value.
Null,
/// True or false.
Boolean(bool),
/// An integer numeric value.
I64(i64),
/// A floating point numeric value.
F64(f64),
/// A list of characters.
String(String),
/// A map of key/value pairs.
Object(Object),
/// A list of values.
Array(Array)
}
impl Value {
/// Gets a value at a specific point. Helpful for retrieving nested values.
pub fn get(&self, mut pointer: Pointer) -> Option<&Value> {
match *self {
Value::Object(ref map) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = map.get(&pointer.remove(0)) {
value.get(pointer)
} else {
None
}
},
Value::Array(ref vec) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = pointer.remove(0).parse::<usize>().ok().map_or(None, |i| vec.get(i)) {
value.get(pointer)
} else {
None
}
},
_ => if pointer.is_empty() { Some(self) } else { None }
}
}
/// Creates a `Value` from a JSON string.
pub fn from_json(json: &str) -> Result<Value, Error> {
serde_json::from_str(json).map_err(Error::from)
}
/// Converts a `Value` into a JSON string.
pub fn to_json(&self) -> Result<String, Error> {
serde_json::to_string(self).map_err(Error::from)
}
/// Converts a `Value` into a nice and indented JSON string.
pub fn to_json_pretty(&self) -> Result<String, Error> {
serde_json::to_string_pretty(self).map_err(Error::from)
}
}
impl Serialize for Value {
#[inline]
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
match *self {
Value::Null => serializer.serialize_unit(),
Value::Boolean(value) => serializer.serialize_bool(value),
Value::I64(value) => serializer.serialize_i64(value),
Value::F64(value) => serializer.serialize_f64(value),
Value::String(ref value) => serializer.serialize_str(&value),
Value::Array(ref value) => value.serialize(serializer),
Value::Object(ref value) => value.serialize(serializer)
}
}
}
impl Deserialize for Value {
#[inline]
fn deserialize<D>(deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer {
struct ValueVisitor;
impl Visitor for ValueVisitor {
type Value = Value;
#[inline] fn visit_bool<E>(&mut self, value: bool) -> Result<Value, E> { Ok(Value::Boolean(value)) }
#[inline] fn visit_u64<E>(&mut self, value: u64) -> Result<Value, E> { Ok(Value::I64(value as i64)) }
#[inline] fn visit_i64<E>(&mut self, value: i64) -> Result<Value, E> { Ok(Value::I64(value)) }
#[inline] fn visit_f64<E>(&mut self, value: f64) -> Result<Value, E> { Ok(Value::F64(value)) }
#[inline] fn visit_str<E>(&mut self, value: &str) -> Result<Value, E> where E: DeError { self.visit_string(value.to_owned()) }
#[inline] fn visit_string<E>(&mut self, value: String) -> Result<Value, E> { Ok(Value::String(value)) }
#[inline] fn visit_none<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_some<D>(&mut self, deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer { Deserialize::deserialize(deserializer) }
#[inline] fn visit_unit<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_seq<V>(&mut self, visitor: V) -> Result<Value, V::Error> where V: SeqVisitor { let values = try!(VecVisitor::new().visit_seq(visitor)); Ok(Value::Array(values)) }
#[inline]
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Value, V::Error> where V: MapVisitor {
let mut object = LinearMap::with_capacity(visitor.size_hint().0);
while let Some((key, value)) = try!(visitor.visit()) {
object.insert(key, value);
}
try!(visitor.end());
Ok(Value::Object(object))
}
}
deserializer.deserialize(ValueVisitor)
}
}
impl<V> From<Option<V>> for Value where V: Into<Value> {
fn from(option: Option<V>) -> Self {
match option {
None => Value::Null,
Some(value) => value.into()
}
}
}
impl From<bool> for Value {
fn from(boolean: bool) -> Self {
Value::Boolean(boolean)
}
}
impl From<i64> for Value {
fn from(number: i64) -> Self {
Value::I64(number)
}
}
impl From<f64> for Value {
fn from(number: f64) -> Self {
Value::F64(number)
}
}
impl From<String> for Value {
fn from(string: String) -> Self {
Value::String(string)
}
}
impl<'a> From<&'a str> for Value {
fn from(string: &'a str) -> Self {
Value::from(string.to_owned())
}
}
/// An iterator of values. Used by drivers to convert their own iterator
/// implementations into a single type.
pub struct ValueIter<'a> {
iter: Box<Iterator<Item=Value> + 'a>
}
impl<'a> ValueIter<'a> {
/// Create a new value iterator.
pub fn new<I>(iter: I) -> Self where I: Iterator<Item=Value> + 'a {
ValueIter {
iter: Box::new(iter)
}
}
}
impl<'a> Iterator for ValueIter<'a> {
type Item = Value;
#[inline]
fn next(&mut self) -> Option<Value> {
self.iter.next()
}
}
#[cfg(test)]
mod tests {
use value::Value;
#[test]
fn
|
() {
assert_eq!(value!().get(point![]).cloned(), Some(value!()));
assert_eq!(value!().get(point!["hello"]).cloned(), None);
assert_eq!(value!().get(point!["a", "b", "c", "d", "e"]).cloned(), None);
assert_eq!(value!(true).get(point![]).cloned(), Some(value!(true)));
assert_eq!(value!(true).get(point!["hello"]).cloned(), None);
assert_eq!(value!(36).get(point![]).cloned(), Some(value!(36)));
assert_eq!(value!(36).get(point!["hello"]).cloned(), None);
assert_eq!(value!("world").get(point![]).cloned(), Some(value!("world")));
assert_eq!(value!("world").get(point!["hello"]).cloned(), None);
}
#[test]
fn test_get_object() {
let object = value!({
"hello" => true,
"world" => 8,
"yolo" => "swag",
"5" => (),
"moon" => {
"hello" => "yoyo"
}
});
assert_eq!(object.get(point![]).cloned(), Some(object.clone()));
assert_eq!(object.get(point!["hello"]).cloned(), Some(value!(true)));
assert_eq!(object.get(point!["yolo"]).cloned(), Some(value!("swag")));
assert_eq!(object.get(point!["5"]).cloned(), Some(value!()));
assert_eq!(object.get(point!["world", "hello"]).cloned(), None);
assert_eq!(object.get(point!["moon", "hello"]).cloned(), Some(value!("yoyo")));
assert_eq!(object.get(point!["moon", "nope"]).cloned(), None);
}
#[test]
fn test_get_array() {
let array = value!([
false,
64,
{
"hello" => true,
"world" => false,
"moon" => {
"goodbye" => "yoyo"
}
},
[[1, 2, 3], 4, 5 ]
]);
assert_eq!(array.get(point![]).cloned(), Some(array.clone()));
assert_eq!(array.get(point!["0"]).cloned(), Some(value!(false)));
assert_eq!(array.get(point!["1"]).cloned(), Some(value!(64)));
assert_eq!(array.get(point!["2", "hello"]).cloned(), Some(value!(true)));
assert_eq!(array.get(point!["2", "moon", "goodbye"]).
|
test_get_primitive
|
identifier_name
|
balloon.rs
|
PartiallyBalloonedPage {
fn new() -> Self {
let page_size = get_page_size();
let len = ((page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
// Initial each padding bit as 1 in bitmap.
let mut bitmap = vec![0_u64; len as usize];
let pad_num = len * 64 - (page_size >> VIRTIO_BALLOON_PFN_SHIFT);
bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
Self {
addr: 0,
bitmap,
page_size,
}
}
fn pfn_match(&self, addr: u64) -> bool {
self.addr == addr & !(self.page_size - 1)
}
fn bitmap_full(&self) -> bool {
self.bitmap.iter().all(|b| *b == u64::MAX)
}
fn
|
(&mut self, addr: u64) {
let addr_offset = (addr % self.page_size) >> VIRTIO_BALLOON_PFN_SHIFT;
self.bitmap[(addr_offset / 64) as usize] |= 1 << (addr_offset % 64);
}
fn reset(&mut self) {
let len = ((self.page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
self.addr = 0;
self.bitmap = vec![0; len as usize];
let pad_num = len * 64 - (self.page_size >> VIRTIO_BALLOON_PFN_SHIFT);
self.bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
}
}
const CONFIG_ACTUAL_OFFSET: u64 = 4;
const CONFIG_ACTUAL_SIZE: usize = 4;
// SAFETY: it only has data and has no implicit padding.
unsafe impl ByteValued for VirtioBalloonConfig {}
struct BalloonEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
inflate_queue_evt: EventFd,
deflate_queue_evt: EventFd,
reporting_queue_evt: Option<EventFd>,
kill_evt: EventFd,
pause_evt: EventFd,
pbp: Option<PartiallyBalloonedPage>,
}
impl BalloonEpollHandler {
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), Error> {
self.interrupt_cb.trigger(int_type).map_err(|e| {
error!("Failed to signal used queue: {:?}", e);
Error::FailedSignal(e)
})
}
fn advise_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
advice: libc::c_int,
) -> result::Result<(), Error> {
let hva = memory
.get_host_address(range_base)
.map_err(Error::GuestMemory)?;
let res =
// SAFETY: Need unsafe to do syscall madvise
unsafe { libc::madvise(hva as *mut libc::c_void, range_len as libc::size_t, advice) };
if res != 0 {
return Err(Error::MadviseFail(io::Error::last_os_error()));
}
Ok(())
}
fn release_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
) -> result::Result<(), Error> {
let region = memory.find_region(range_base).ok_or(Error::GuestMemory(
GuestMemoryError::InvalidGuestAddress(range_base),
))?;
if let Some(f_off) = region.file_offset() {
let offset = range_base.0 - region.start_addr().0;
// SAFETY: FFI call with valid arguments
let res = unsafe {
libc::fallocate64(
f_off.file().as_raw_fd(),
libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE,
(offset + f_off.start()) as libc::off64_t,
range_len as libc::off64_t,
)
};
if res != 0 {
return Err(Error::FallocateFail(io::Error::last_os_error()));
}
}
Self::advise_memory_range(memory, range_base, range_len, libc::MADV_DONTNEED)
}
fn release_memory_range_4k(
pbp: &mut Option<PartiallyBalloonedPage>,
memory: &GuestMemoryMmap,
pfn: u32,
) -> result::Result<(), Error> {
let range_base = GuestAddress((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
let range_len = 1 << VIRTIO_BALLOON_PFN_SHIFT;
let page_size: u64 = get_page_size();
if page_size == 1 << VIRTIO_BALLOON_PFN_SHIFT {
return Self::release_memory_range(memory, range_base, range_len);
}
if pbp.is_none() {
*pbp = Some(PartiallyBalloonedPage::new());
}
if !pbp.as_ref().unwrap().pfn_match(range_base.0) {
// We are trying to free memory region in a different pfn with current pbp. Flush pbp.
pbp.as_mut().unwrap().reset();
pbp.as_mut().unwrap().addr = align_page_size_down(range_base.0);
}
pbp.as_mut().unwrap().set_bit(range_base.0);
if pbp.as_ref().unwrap().bitmap_full() {
Self::release_memory_range(
memory,
vm_memory::GuestAddress(pbp.as_ref().unwrap().addr),
page_size as usize,
)?;
pbp.as_mut().unwrap().reset();
}
Ok(())
}
fn process_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
let data_chunk_size = size_of::<u32>();
// The head contains the request type which MUST be readable.
if desc.is_write_only() {
error!("The head contains the request type is not right");
return Err(Error::UnexpectedWriteOnlyDescriptor);
}
if desc.len() as usize % data_chunk_size != 0 {
error!("the request size {} is not right", desc.len());
return Err(Error::InvalidRequest);
}
let mut offset = 0u64;
while offset < desc.len() as u64 {
let addr = desc.addr().checked_add(offset).unwrap();
let pfn: u32 = desc_chain
.memory()
.read_obj(addr)
.map_err(Error::GuestMemory)?;
offset += data_chunk_size as u64;
match queue_index {
0 => {
Self::release_memory_range_4k(&mut self.pbp, desc_chain.memory(), pfn)?;
}
1 => {
let page_size = get_page_size() as usize;
let rbase = align_page_size_down((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
Self::advise_memory_range(
desc_chain.memory(),
vm_memory::GuestAddress(rbase),
page_size,
libc::MADV_WILLNEED,
)?;
}
_ => return Err(Error::InvalidQueueIndex(queue_index)),
}
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), desc.len())
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn process_reporting_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let mut descs_len = 0;
while let Some(desc) = desc_chain.next() {
descs_len += desc.len();
Self::release_memory_range(desc_chain.memory(), desc.addr(), desc.len() as usize)?;
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), descs_len)
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn run(
&mut self,
paused: Arc<AtomicBool>,
paused_sync: Arc<Barrier>,
) -> result::Result<(), EpollHelperError> {
let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
helper.add_event(self.inflate_queue_evt.as_raw_fd(), INFLATE_QUEUE_EVENT)?;
helper.add_event(self.deflate_queue_evt.as_raw_fd(), DEFLATE_QUEUE_EVENT)?;
|
set_bit
|
identifier_name
|
balloon.rs
|
lyBalloonedPage {
fn new() -> Self {
let page_size = get_page_size();
let len = ((page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
// Initial each padding bit as 1 in bitmap.
let mut bitmap = vec![0_u64; len as usize];
let pad_num = len * 64 - (page_size >> VIRTIO_BALLOON_PFN_SHIFT);
bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
Self {
addr: 0,
bitmap,
page_size,
}
}
fn pfn_match(&self, addr: u64) -> bool {
self.addr == addr & !(self.page_size - 1)
}
fn bitmap_full(&self) -> bool {
self.bitmap.iter().all(|b| *b == u64::MAX)
}
fn set_bit(&mut self, addr: u64) {
let addr_offset = (addr % self.page_size) >> VIRTIO_BALLOON_PFN_SHIFT;
self.bitmap[(addr_offset / 64) as usize] |= 1 << (addr_offset % 64);
}
fn reset(&mut self) {
let len = ((self.page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
self.addr = 0;
self.bitmap = vec![0; len as usize];
let pad_num = len * 64 - (self.page_size >> VIRTIO_BALLOON_PFN_SHIFT);
self.bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
}
}
const CONFIG_ACTUAL_OFFSET: u64 = 4;
const CONFIG_ACTUAL_SIZE: usize = 4;
// SAFETY: it only has data and has no implicit padding.
unsafe impl ByteValued for VirtioBalloonConfig {}
struct BalloonEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
inflate_queue_evt: EventFd,
deflate_queue_evt: EventFd,
reporting_queue_evt: Option<EventFd>,
kill_evt: EventFd,
pause_evt: EventFd,
pbp: Option<PartiallyBalloonedPage>,
}
impl BalloonEpollHandler {
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), Error> {
self.interrupt_cb.trigger(int_type).map_err(|e| {
error!("Failed to signal used queue: {:?}", e);
Error::FailedSignal(e)
})
}
fn advise_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
advice: libc::c_int,
) -> result::Result<(), Error> {
let hva = memory
.get_host_address(range_base)
.map_err(Error::GuestMemory)?;
let res =
// SAFETY: Need unsafe to do syscall madvise
unsafe { libc::madvise(hva as *mut libc::c_void, range_len as libc::size_t, advice) };
if res != 0 {
return Err(Error::MadviseFail(io::Error::last_os_error()));
}
Ok(())
}
fn release_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
) -> result::Result<(), Error> {
let region = memory.find_region(range_base).ok_or(Error::GuestMemory(
GuestMemoryError::InvalidGuestAddress(range_base),
))?;
if let Some(f_off) = region.file_offset() {
let offset = range_base.0 - region.start_addr().0;
// SAFETY: FFI call with valid arguments
let res = unsafe {
libc::fallocate64(
f_off.file().as_raw_fd(),
libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE,
(offset + f_off.start()) as libc::off64_t,
range_len as libc::off64_t,
)
};
if res != 0 {
return Err(Error::FallocateFail(io::Error::last_os_error()));
}
}
Self::advise_memory_range(memory, range_base, range_len, libc::MADV_DONTNEED)
}
fn release_memory_range_4k(
pbp: &mut Option<PartiallyBalloonedPage>,
memory: &GuestMemoryMmap,
pfn: u32,
) -> result::Result<(), Error> {
let range_base = GuestAddress((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
let range_len = 1 << VIRTIO_BALLOON_PFN_SHIFT;
let page_size: u64 = get_page_size();
if page_size == 1 << VIRTIO_BALLOON_PFN_SHIFT {
return Self::release_memory_range(memory, range_base, range_len);
}
if pbp.is_none() {
*pbp = Some(PartiallyBalloonedPage::new());
}
if !pbp.as_ref().unwrap().pfn_match(range_base.0) {
// We are trying to free memory region in a different pfn with current pbp. Flush pbp.
pbp.as_mut().unwrap().reset();
pbp.as_mut().unwrap().addr = align_page_size_down(range_base.0);
}
pbp.as_mut().unwrap().set_bit(range_base.0);
if pbp.as_ref().unwrap().bitmap_full() {
Self::release_memory_range(
memory,
vm_memory::GuestAddress(pbp.as_ref().unwrap().addr),
page_size as usize,
)?;
pbp.as_mut().unwrap().reset();
}
Ok(())
}
fn process_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
let data_chunk_size = size_of::<u32>();
// The head contains the request type which MUST be readable.
if desc.is_write_only() {
error!("The head contains the request type is not right");
return Err(Error::UnexpectedWriteOnlyDescriptor);
}
if desc.len() as usize % data_chunk_size != 0 {
error!("the request size {} is not right", desc.len());
return Err(Error::InvalidRequest);
}
let mut offset = 0u64;
while offset < desc.len() as u64 {
let addr = desc.addr().checked_add(offset).unwrap();
let pfn: u32 = desc_chain
.memory()
.read_obj(addr)
.map_err(Error::GuestMemory)?;
offset += data_chunk_size as u64;
match queue_index {
0 =>
|
1 => {
let page_size = get_page_size() as usize;
let rbase = align_page_size_down((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
Self::advise_memory_range(
desc_chain.memory(),
vm_memory::GuestAddress(rbase),
page_size,
libc::MADV_WILLNEED,
)?;
}
_ => return Err(Error::InvalidQueueIndex(queue_index)),
}
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), desc.len())
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn process_reporting_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let mut descs_len = 0;
while let Some(desc) = desc_chain.next() {
descs_len += desc.len();
Self::release_memory_range(desc_chain.memory(), desc.addr(), desc.len() as usize)?;
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), descs_len)
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn run(
&mut self,
paused: Arc<AtomicBool>,
paused_sync: Arc<Barrier>,
) -> result::Result<(), EpollHelperError> {
let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
helper.add_event(self.inflate_queue_evt.as_raw_fd(), INFLATE_QUEUE_EVENT)?;
helper.add_event(self.deflate_queue_evt.as_raw_fd(), DEFLATE_QUEUE_EVENT)?;
|
{
Self::release_memory_range_4k(&mut self.pbp, desc_chain.memory(), pfn)?;
}
|
conditional_block
|
balloon.rs
|
).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
}
DEFLATE_QUEUE_EVENT => {
self.deflate_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get deflate queue event: {:?}",
e
))
})?;
self.process_queue(1).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used deflate queue: {:?}",
e
))
})?;
}
REPORTING_QUEUE_EVENT => {
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
reporting_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get reporting queue event: {:?}",
e
))
})?;
self.process_reporting_queue(2).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
} else {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Invalid reporting queue event as no eventfd registered"
)));
}
}
_ => {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Unknown event for virtio-balloon"
)));
}
}
Ok(())
}
}
#[derive(Versionize)]
pub struct BalloonState {
pub avail_features: u64,
pub acked_features: u64,
pub config: VirtioBalloonConfig,
}
impl VersionMapped for BalloonState {}
// Virtio device for exposing entropy to the guest OS through virtio.
pub struct Balloon {
common: VirtioCommon,
id: String,
config: VirtioBalloonConfig,
seccomp_action: SeccompAction,
exit_evt: EventFd,
interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
}
impl Balloon {
// Create a new virtio-balloon.
pub fn new(
id: String,
size: u64,
deflate_on_oom: bool,
free_page_reporting: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
state: Option<BalloonState>,
) -> io::Result<Self> {
let mut queue_sizes = vec![QUEUE_SIZE; MIN_NUM_QUEUES];
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
info!("Restoring virtio-balloon {}", id);
(
state.avail_features,
state.acked_features,
state.config,
true,
)
} else {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
if deflate_on_oom {
avail_features |= 1u64 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM;
}
if free_page_reporting {
avail_features |= 1u64 << VIRTIO_BALLOON_F_REPORTING;
}
let config = VirtioBalloonConfig {
num_pages: (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32,
..Default::default()
};
(avail_features, 0, config, false)
};
if free_page_reporting {
queue_sizes.push(REPORTING_QUEUE_SIZE);
}
Ok(Balloon {
common: VirtioCommon {
device_type: VirtioDeviceType::Balloon as u32,
avail_features,
acked_features,
paused_sync: Some(Arc::new(Barrier::new(2))),
queue_sizes,
min_queues: MIN_NUM_QUEUES as u16,
paused: Arc::new(AtomicBool::new(paused)),
..Default::default()
},
id,
config,
seccomp_action,
exit_evt,
interrupt_cb: None,
})
}
pub fn resize(&mut self, size: u64) -> Result<(), Error> {
self.config.num_pages = (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32;
if let Some(interrupt_cb) = &self.interrupt_cb {
interrupt_cb
.trigger(VirtioInterruptType::Config)
.map_err(Error::FailedSignal)
} else {
Ok(())
}
}
// Get the actual size of the virtio-balloon.
pub fn get_actual(&self) -> u64 {
(self.config.actual as u64) << VIRTIO_BALLOON_PFN_SHIFT
}
fn state(&self) -> BalloonState {
BalloonState {
avail_features: self.common.avail_features,
acked_features: self.common.acked_features,
config: self.config,
}
}
#[cfg(fuzzing)]
pub fn wait_for_epoll_threads(&mut self) {
self.common.wait_for_epoll_threads();
}
}
impl Drop for Balloon {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
self.common.wait_for_epoll_threads();
}
}
impl VirtioDevice for Balloon {
fn device_type(&self) -> u32 {
self.common.device_type
}
fn queue_max_sizes(&self) -> &[u16] {
&self.common.queue_sizes
}
fn features(&self) -> u64 {
self.common.avail_features
}
fn ack_features(&mut self, value: u64) {
self.common.ack_features(value)
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
self.read_config_from_slice(self.config.as_slice(), offset, data);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
// The "actual" field is the only mutable field
if offset != CONFIG_ACTUAL_OFFSET || data.len() != CONFIG_ACTUAL_SIZE {
error!(
"Attempt to write to read-only field: offset {:x} length {}",
offset,
data.len()
);
return;
}
let config = self.config.as_mut_slice();
let config_len = config.len() as u64;
let data_len = data.len() as u64;
if offset + data_len > config_len {
error!(
"Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
config_len,
offset,
data_len,
self.device_type()
);
return;
}
if let Some(end) = offset.checked_add(config.len() as u64) {
let mut offset_config =
&mut config[offset as usize..std::cmp::min(end, config_len) as usize];
offset_config.write_all(data).unwrap();
}
}
fn activate(
&mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut virtqueues = Vec::new();
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let inflate_queue_evt = queue_evt;
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let deflate_queue_evt = queue_evt;
let reporting_queue_evt =
if self.common.feature_acked(VIRTIO_BALLOON_F_REPORTING) && !queues.is_empty() {
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
Some(queue_evt)
} else {
None
};
self.interrupt_cb = Some(interrupt_cb.clone());
let mut handler = BalloonEpollHandler {
mem,
queues: virtqueues,
interrupt_cb,
inflate_queue_evt,
deflate_queue_evt,
reporting_queue_evt,
kill_evt,
pause_evt,
pbp: None,
};
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let mut epoll_threads = Vec::new();
spawn_virtio_thread(
&self.id,
&self.seccomp_action,
Thread::VirtioBalloon,
&mut epoll_threads,
&self.exit_evt,
move || handler.run(paused, paused_sync.unwrap()),
)?;
self.common.epoll_threads = Some(epoll_threads);
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
let result = self.common.reset();
event!("virtio-device", "reset", "id", &self.id);
result
}
}
impl Pausable for Balloon {
fn pause(&mut self) -> result::Result<(), MigratableError> {
self.common.pause()
}
|
fn resume(&mut self) -> result::Result<(), MigratableError> {
|
random_line_split
|
|
authenticate-helper.ts
|
|| null;
const now = getNow();
const deviceName = getUA(req.headers['user-agent']);
if (!token) {
if (!!req.cookies.refreshToken) {
const ignoredOperationList = ['getProducts', 'getCategories', 'getNewspapers'];
if (ignoredOperationList.includes(req.body.operationName)) {
// console.log(`Ignore ` + req.body.operationName );
return {};
}
return getNewJwtToken(req.cookies.refreshToken);
} else {
return {};
}
} else {
if (token.startsWith('Bearer')) {
token = token.replace('Bearer ', '');
let customerJWT = verifyJWT(token);
if (customerJWT.data) {
customerJWT = customerJWT.data;
if (req.body.operationName == 'getCustomer') {
//get main customer
let customer = await findAndProcessCustomer({ _id: customerJWT._id });
//reset refresh token
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}`);
await setNewRefreshToken(customer, req.cookies.refreshToken, newRefreshToken, res);
return {
customer
}
} else if (req.body.operationName == 'getChildCustomer') {
if (!!req.body.variables.customer_id) {
checkCustomerBelong(req.body.variables.customer_id, customerJWT)
customerJWT.affectedCustomerId = req.body.variables.customer_id;
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
await setNewRefreshToken(customerJWT, req.cookies.refreshToken, newRefreshToken, res);
} else {
throw new AuthenticationError("Customer Id is missing");
}
let jwtToken = jwt.sign(
customerJWT,
config.JWT_SECRET
);
return {
customer: {
...customerJWT,
},
accessToken: jwtToken,
}
} else {
return {
customer: customerJWT
}
}
} else if (customerJWT.err.name == 'TokenExpiredError' && !!req.cookies.refreshToken) {
return getNewJwtToken(req.cookies.refreshToken);
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
if (token.startsWith('Basic')) {
if (!config.REFRESH_TOKEN_EXPIRE_LIMIT || !config.JWT_EXPIRE_LIMIT) {
throw new AuthenticationError(MSG_SYSTEM_ERROR);
}
//login
token = token.replace('Basic ', '');
let query = Buffer.from(token, 'base64').toString('binary').split(':');
let customer = await findAndProcessCustomer({
'$or': [
{
'email': query[0],
},
{
'username': query[0],
},
]
}, true);
if (customer) {
//match password
const match = await bcrypt.compare(query[1], customer.password);
if (match) {
let sessionId = now;
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
store_id: customer.store_id,
};
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
affectedCustomerId: customer._id,
sessionId,
type: customer.type
},
config.JWT_SECRET
);
const refreshToken = encryptAES(`${customer._id}-${deviceName}-${now}`);
setRefreshTokenCookie(refreshToken, res);
//update customer
let setObject: any = {};
if (!!customer.session) {
setObject[`session.${refreshToken}`] = now + config.REFRESH_TOKEN_EXPIRE_LIMIT;
} else {
setObject = {
session: {
[refreshToken]: now + config.REFRESH_TOKEN_EXPIRE_LIMIT
}
}
}
let db = await getDb();
await db.collection('customer').updateOne({
_id: customer._id,
}, {
'$set': setObject
});
return {
customer,
accessToken: jwtToken,
}
} else {
throw new AuthenticationError(PASSWORD_IS_WRONG);
}
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
return {};
}
}
export const checkCustomerBelong = (customerId: any, parentCustomer: any) => {
if (!parentCustomer.customer_list || !parentCustomer.customer_list.map((x: any) => x._id).includes(customerId)) {
throw new AuthenticationError('This customer not belong to you');
}
return true;
}
const encryptAES = (text: string) => {
let encrypted = crypto.AES.encrypt(text, config.JWT_SECRET).toString();
return encrypted;
}
const decryptAES = (text: string) => {
let bytesDecrypted = crypto.AES.decrypt(text, config.JWT_SECRET);
return bytesDecrypted.toString(crypto.enc.Utf8);
}
export const getUA = (userAgent: string) => {
let device = "Unknown";
const ua: any = {
"Generic Linux": /Linux/i,
"Android": /Android/i,
"BlackBerry": /BlackBerry/i,
"Bluebird": /EF500/i,
"Chrome OS": /CrOS/i,
"Datalogic": /DL-AXIS/i,
"Honeywell": /CT50/i,
"iPad": /iPad/i,
"iPhone": /iPhone/i,
"iPod": /iPod/i,
"macOS": /Macintosh/i,
"Windows": /IEMobile|Windows/i,
"Zebra": /TC70|TC55/i,
}
Object.keys(ua).map(v => userAgent.match(ua[v]) && (device = v));
return device;
}
const getNewJwtToken = async (refreshToken: string) => {
const now = getNow();
let sessionId = now;
let condition: any = {};
condition[`session.${refreshToken}`] = {
'$gte': now
}
let customer = await findAndProcessCustomer(condition);
if (customer) {
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
type: customer.type,
store_id: customer.store_id,
};
let customerChild = null;
//Must return child customer info when customer is GROUP_ADMIN
if (customer.type === CUSTOMER_TYPE.GROUP_ADMIN) {
let decryptResult = decryptAES(refreshToken);
//const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
let affectedCustomerId = decryptResult.split('-')[3] ?? null;
affectedCustomerId = parseInt(affectedCustomerId);
if (affectedCustomerId) {
customerChild = await findAndProcessCustomer({ _id: affectedCustomerId });
}
}
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
sessionId,
affectedCustomerId: customerChild ? customerChild._id : customer._id,
store_id: customerChild ? customerChild.store_id : customer.store_id,
type: customer.type
},
config.JWT_SECRET
);
return {
customer: customerChild ?? customer,
accessToken: jwtToken
}
} else {
//remove refresh token
let db = await getDb();
condition[`session.${refreshToken}`] = {
'$ne': null
};
let unsetObject: any = {};
unsetObject[`session.${refreshToken}`] = 1;
await db.collection('user').updateOne(condition, {
'$unset': unsetObject
});
throw new ApolloError('Invalid/Expired Refresh Token', 'INVALID/EXPIRED_REFRESH_TOKEN');
}
}
export const findAndProcessCustomer = async (condition: any, projectPassword: boolean = false) => {
let db = await getDb();
let projection: any = {
projection: 0
}
if (!projectPassword)
|
let customer = await db.collection('customer').aggregate([
{ '$match': condition },
{ '$project': projection },
{
'$lookup': {
from: 'store',
localField: 'store_id',
foreignField: '_id',
as: 'store'
}
},
{
'$lookup': {
from: 'zip_code',
localField: 'zip_code_id',
foreignField: '_id',
as: 'zip_code'
}
},
{
'$lookup': {
from: 'municipality',
localField: 'zip_code.municipality_id',
foreignField: '_id',
as: 'municipality'
}
},
{
'$set': {
municipality: {
$arrayElemAt: ['$municipality', 0]
},
store: {
'$arrayElemAt': ['$store', 0]
},
zip_code: {
'$arrayElemAt': ['$zip_code', 0]
},
}
},
{
$addFields: {
'municipality.overweight_price': {
$toDouble: '$municipality.overweight_price',
}
}
},
|
{
projection = {
'password': 0
}
}
|
conditional_block
|
authenticate-helper.ts
|
.authorization || null;
const now = getNow();
const deviceName = getUA(req.headers['user-agent']);
if (!token) {
if (!!req.cookies.refreshToken) {
const ignoredOperationList = ['getProducts', 'getCategories', 'getNewspapers'];
if (ignoredOperationList.includes(req.body.operationName)) {
// console.log(`Ignore ` + req.body.operationName );
return {};
}
return getNewJwtToken(req.cookies.refreshToken);
} else {
return {};
}
} else {
if (token.startsWith('Bearer')) {
token = token.replace('Bearer ', '');
let customerJWT = verifyJWT(token);
if (customerJWT.data) {
customerJWT = customerJWT.data;
if (req.body.operationName == 'getCustomer') {
//get main customer
let customer = await findAndProcessCustomer({ _id: customerJWT._id });
//reset refresh token
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}`);
await setNewRefreshToken(customer, req.cookies.refreshToken, newRefreshToken, res);
return {
customer
}
} else if (req.body.operationName == 'getChildCustomer') {
if (!!req.body.variables.customer_id) {
checkCustomerBelong(req.body.variables.customer_id, customerJWT)
customerJWT.affectedCustomerId = req.body.variables.customer_id;
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
await setNewRefreshToken(customerJWT, req.cookies.refreshToken, newRefreshToken, res);
} else {
throw new AuthenticationError("Customer Id is missing");
}
let jwtToken = jwt.sign(
customerJWT,
config.JWT_SECRET
);
return {
customer: {
...customerJWT,
},
accessToken: jwtToken,
}
} else {
return {
customer: customerJWT
}
}
} else if (customerJWT.err.name == 'TokenExpiredError' && !!req.cookies.refreshToken) {
return getNewJwtToken(req.cookies.refreshToken);
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
|
//login
token = token.replace('Basic ', '');
let query = Buffer.from(token, 'base64').toString('binary').split(':');
let customer = await findAndProcessCustomer({
'$or': [
{
'email': query[0],
},
{
'username': query[0],
},
]
}, true);
if (customer) {
//match password
const match = await bcrypt.compare(query[1], customer.password);
if (match) {
let sessionId = now;
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
store_id: customer.store_id,
};
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
affectedCustomerId: customer._id,
sessionId,
type: customer.type
},
config.JWT_SECRET
);
const refreshToken = encryptAES(`${customer._id}-${deviceName}-${now}`);
setRefreshTokenCookie(refreshToken, res);
//update customer
let setObject: any = {};
if (!!customer.session) {
setObject[`session.${refreshToken}`] = now + config.REFRESH_TOKEN_EXPIRE_LIMIT;
} else {
setObject = {
session: {
[refreshToken]: now + config.REFRESH_TOKEN_EXPIRE_LIMIT
}
}
}
let db = await getDb();
await db.collection('customer').updateOne({
_id: customer._id,
}, {
'$set': setObject
});
return {
customer,
accessToken: jwtToken,
}
} else {
throw new AuthenticationError(PASSWORD_IS_WRONG);
}
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
return {};
}
}
export const checkCustomerBelong = (customerId: any, parentCustomer: any) => {
if (!parentCustomer.customer_list || !parentCustomer.customer_list.map((x: any) => x._id).includes(customerId)) {
throw new AuthenticationError('This customer not belong to you');
}
return true;
}
const encryptAES = (text: string) => {
let encrypted = crypto.AES.encrypt(text, config.JWT_SECRET).toString();
return encrypted;
}
const decryptAES = (text: string) => {
let bytesDecrypted = crypto.AES.decrypt(text, config.JWT_SECRET);
return bytesDecrypted.toString(crypto.enc.Utf8);
}
export const getUA = (userAgent: string) => {
let device = "Unknown";
const ua: any = {
"Generic Linux": /Linux/i,
"Android": /Android/i,
"BlackBerry": /BlackBerry/i,
"Bluebird": /EF500/i,
"Chrome OS": /CrOS/i,
"Datalogic": /DL-AXIS/i,
"Honeywell": /CT50/i,
"iPad": /iPad/i,
"iPhone": /iPhone/i,
"iPod": /iPod/i,
"macOS": /Macintosh/i,
"Windows": /IEMobile|Windows/i,
"Zebra": /TC70|TC55/i,
}
Object.keys(ua).map(v => userAgent.match(ua[v]) && (device = v));
return device;
}
const getNewJwtToken = async (refreshToken: string) => {
const now = getNow();
let sessionId = now;
let condition: any = {};
condition[`session.${refreshToken}`] = {
'$gte': now
}
let customer = await findAndProcessCustomer(condition);
if (customer) {
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
type: customer.type,
store_id: customer.store_id,
};
let customerChild = null;
//Must return child customer info when customer is GROUP_ADMIN
if (customer.type === CUSTOMER_TYPE.GROUP_ADMIN) {
let decryptResult = decryptAES(refreshToken);
//const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
let affectedCustomerId = decryptResult.split('-')[3] ?? null;
affectedCustomerId = parseInt(affectedCustomerId);
if (affectedCustomerId) {
customerChild = await findAndProcessCustomer({ _id: affectedCustomerId });
}
}
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
sessionId,
affectedCustomerId: customerChild ? customerChild._id : customer._id,
store_id: customerChild ? customerChild.store_id : customer.store_id,
type: customer.type
},
config.JWT_SECRET
);
return {
customer: customerChild ?? customer,
accessToken: jwtToken
}
} else {
//remove refresh token
let db = await getDb();
condition[`session.${refreshToken}`] = {
'$ne': null
};
let unsetObject: any = {};
unsetObject[`session.${refreshToken}`] = 1;
await db.collection('user').updateOne(condition, {
'$unset': unsetObject
});
throw new ApolloError('Invalid/Expired Refresh Token', 'INVALID/EXPIRED_REFRESH_TOKEN');
}
}
export const findAndProcessCustomer = async (condition: any, projectPassword: boolean = false) => {
let db = await getDb();
let projection: any = {
projection: 0
}
if (!projectPassword) {
projection = {
'password': 0
}
}
let customer = await db.collection('customer').aggregate([
{ '$match': condition },
{ '$project': projection },
{
'$lookup': {
from: 'store',
localField: 'store_id',
foreignField: '_id',
as: 'store'
}
},
{
'$lookup': {
from: 'zip_code',
localField: 'zip_code_id',
foreignField: '_id',
as: 'zip_code'
}
},
{
'$lookup': {
from: 'municipality',
localField: 'zip_code.municipality_id',
foreignField: '_id',
as: 'municipality'
}
},
{
'$set': {
municipality: {
$arrayElemAt: ['$municipality', 0]
},
store: {
'$arrayElemAt': ['$store', 0]
},
zip_code: {
'$arrayElemAt': ['$zip_code', 0]
},
}
},
{
$addFields: {
'municipality.overweight_price': {
$toDouble: '$municipality.overweight_price',
}
}
},
|
if (token.startsWith('Basic')) {
if (!config.REFRESH_TOKEN_EXPIRE_LIMIT || !config.JWT_EXPIRE_LIMIT) {
throw new AuthenticationError(MSG_SYSTEM_ERROR);
}
|
random_line_split
|
disk_location.go
|
.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10 {
workerNum = 10
}
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
_, err := l.deleteVolumeById(vid, onlyEmpty)
return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
v.Close()
delete(l.volumes, vid)
return nil
}
func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
deltaVols := make(map[needle.VolumeId]*Volume, 0)
for k, v := range l.volumes {
if v.Collection == collectionName && !v.isCompacting && !v.isCommitCompacting {
deltaVols[k] = v
}
}
for k := range deltaVols {
delete(l.volumes, k)
}
return deltaVols
}
func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
l.volumes[vid] = volume
volume.location = l
}
func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
v, ok := l.volumes[vid]
return v, ok
}
func (l *DiskLocation) VolumesLen() int {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
return len(l.volumes)
}
func (l *DiskLocation) SetStopping() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.SyncToDisk()
}
l.volumesLock.Unlock()
return
}
func (l *DiskLocation) Close() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.Close()
}
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
for _, ecVolume := range l.ecVolumes {
ecVolume.Close()
}
l.ecVolumesLock.Unlock()
close(l.closeCh)
return
}
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
// println("LocateVolume", vid, "on", l.Directory)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
// println("checking", entry.Name(), "...")
volId, _, err := volumeIdFromFileName(entry.Name())
// println("volId", volId, "err", err)
if vid == volId && err == nil {
return entry, true
}
}
}
return nil, false
}
func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
for _, vol := range l.volumes {
if vol.IsReadOnly() {
continue
}
datSize, idxSize, _ := vol.FileStat()
unUsedSpace += volumeSizeLimit - (datSize + idxSize)
}
return
}
func (l *DiskLocation) CheckDiskSpace() {
if dir, e := filepath.Abs(l.Directory); e == nil {
s := stats.NewDiskStatus(dir)
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
if isLow != l.isDiskSpaceLow {
l.isDiskSpaceLow = !l.isDiskSpaceLow
}
|
logLevel := glog.Level(4)
if l.isDiskSpaceLow {
logLevel = glog.Level(0)
}
|
random_line_split
|
|
disk_location.go
|
func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
dir = util.ResolvePath(dir)
if idxDir == "" {
idxDir = dir
} else {
idxDir = util.ResolvePath(idxDir)
}
dirUuid, err := GenerateDirUuid(dir)
if err != nil {
glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err)
}
location := &DiskLocation{
Directory: dir,
DirectoryUuid: dirUuid,
IdxDirectory: idxDir,
DiskType: diskType,
MaxVolumeCount: maxVolumeCount,
OriginalMaxVolumeCount: maxVolumeCount,
MinFreeSpace: minFreeSpace,
}
location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
location.closeCh = make(chan struct{})
go func() {
location.CheckDiskSpace()
for {
select {
case <-location.closeCh:
return
case <-time.After(time.Minute):
location.CheckDiskSpace()
}
}
}()
return location
}
func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) {
if isValidVolume(filename) {
base := filename[:len(filename)-4]
collection, volumeId, err := parseCollectionVolumeId(base)
return volumeId, collection, err
}
return 0, "", fmt.Errorf("file is not a volume: %s", filename)
}
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
i := strings.LastIndex(base, "_")
if i > 0 {
collection, base = base[0:i], base[i+1:]
}
vol, err := needle.NewVolumeId(base)
return collection, vol, err
}
func isValidVolume(basename string) bool {
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
}
func getValidVolumeName(basename string) string {
if isValidVolume(basename) {
return basename[:len(basename)-4]
}
return ""
}
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
}
volumeName := getValidVolumeName(basename)
if volumeName == "" {
return false
}
// skip if ec volumes exists
if skipIfEcVolumesExists {
if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
return false
}
}
// check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) {
note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10 {
workerNum = 10
}
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
|
{
glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
dirUuidString = ""
fileName := dir + "/vol_dir.uuid"
if !util.FileExists(fileName) {
dirUuid, _ := uuid.NewRandom()
dirUuidString = dirUuid.String()
writeErr := util.WriteFile(fileName, []byte(dirUuidString), 0644)
if writeErr != nil {
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
}
} else {
uuidData, readErr := os.ReadFile(fileName)
if readErr != nil {
return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
}
dirUuidString = string(uuidData)
}
return dirUuidString, nil
}
|
identifier_body
|
|
disk_location.go
|
UuidString), 0644)
if writeErr != nil {
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
}
} else {
uuidData, readErr := os.ReadFile(fileName)
if readErr != nil {
return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
}
dirUuidString = string(uuidData)
}
return dirUuidString, nil
}
func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
dir = util.ResolvePath(dir)
if idxDir == "" {
idxDir = dir
} else {
idxDir = util.ResolvePath(idxDir)
}
dirUuid, err := GenerateDirUuid(dir)
if err != nil {
glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err)
}
location := &DiskLocation{
Directory: dir,
DirectoryUuid: dirUuid,
IdxDirectory: idxDir,
DiskType: diskType,
MaxVolumeCount: maxVolumeCount,
OriginalMaxVolumeCount: maxVolumeCount,
MinFreeSpace: minFreeSpace,
}
location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
location.closeCh = make(chan struct{})
go func() {
location.CheckDiskSpace()
for {
select {
case <-location.closeCh:
return
case <-time.After(time.Minute):
location.CheckDiskSpace()
}
}
}()
return location
}
func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) {
if isValidVolume(filename) {
base := filename[:len(filename)-4]
collection, volumeId, err := parseCollectionVolumeId(base)
return volumeId, collection, err
}
return 0, "", fmt.Errorf("file is not a volume: %s", filename)
}
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
i := strings.LastIndex(base, "_")
if i > 0 {
collection, base = base[0:i], base[i+1:]
}
vol, err := needle.NewVolumeId(base)
return collection, vol, err
}
func isValidVolume(basename string) bool {
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
}
func getValidVolumeName(basename string) string {
if isValidVolume(basename) {
return basename[:len(basename)-4]
}
return ""
}
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
}
volumeName := getValidVolumeName(basename)
if volumeName == "" {
return false
}
// skip if ec volumes exists
if skipIfEcVolumesExists {
if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
return false
}
}
// check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) {
note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10
|
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
_, err := l.deleteVolumeById(vid, onlyEmpty)
return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
return
|
{
workerNum = 10
}
|
conditional_block
|
disk_location.go
|
, vol, err
}
func isValidVolume(basename string) bool {
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
}
func getValidVolumeName(basename string) string {
if isValidVolume(basename) {
return basename[:len(basename)-4]
}
return ""
}
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
}
volumeName := getValidVolumeName(basename)
if volumeName == "" {
return false
}
// skip if ec volumes exists
if skipIfEcVolumesExists {
if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
return false
}
}
// check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) {
note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10 {
workerNum = 10
}
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
_, err := l.deleteVolumeById(vid, onlyEmpty)
return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
v.Close()
delete(l.volumes, vid)
return nil
}
func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
deltaVols := make(map[needle.VolumeId]*Volume, 0)
for k, v := range l.volumes {
if v.Collection == collectionName && !v.isCompacting && !v.isCommitCompacting {
deltaVols[k] = v
}
}
for k := range deltaVols {
delete(l.volumes, k)
}
return deltaVols
}
func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
l.volumes[vid] = volume
volume.location = l
}
func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
v, ok := l.volumes[vid]
return v, ok
}
func (l *DiskLocation) VolumesLen() int {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
return len(l.volumes)
}
func (l *DiskLocation) SetStopping() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.SyncToDisk()
}
l.volumesLock.Unlock()
return
}
func (l *DiskLocation) Close() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.Close()
}
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
for _, ecVolume := range l.ecVolumes {
ecVolume.Close()
}
l.ecVolumesLock.Unlock()
close(l.closeCh)
return
}
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
// println("LocateVolume", vid, "on", l.Directory)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
// println("checking", entry.Name(), "...")
volId, _, err := volumeIdFromFileName(entry.Name())
// println("volId", volId, "err", err)
if vid == volId && err == nil {
return entry, true
}
}
}
return nil, false
}
func (l *DiskLocation)
|
UnUsedSpace
|
identifier_name
|
|
utility.rs
|
let Bar(field) = Bar("hello"); // more common syntax
/// # }
/// ```
///
/// This function helps field access in context where you are declaring either
/// a tuple struct or a struct with named fields. If you don't have a field name,
/// it means you need to access the struct through an index.
pub(crate) fn ident_or_index(ident: Option<&Ident>, index: usize) -> Member {
ident.map_or_else(
|| Member::Unnamed(index.into()),
|ident| Member::Named(ident.clone()),
)
}
/// Options defining how to extend the `where` clause in reflection with any additional bounds needed.
pub(crate) struct WhereClauseOptions {
/// Type parameters that need extra trait bounds.
parameter_types: Box<[Ident]>,
/// Trait bounds to add to the type parameters.
parameter_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that will be reflected and need an extra trait bound
active_types: Box<[Type]>,
/// Trait bounds to add to the active types
active_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that won't be reflected and need an extra trait bound
ignored_types: Box<[Type]>,
/// Trait bounds to add to the ignored types
ignored_trait_bounds: Box<[proc_macro2::TokenStream]>,
}
impl Default for WhereClauseOptions {
/// By default, don't add any additional bounds to the `where` clause
fn default() -> Self {
Self {
parameter_types: Box::new([]),
active_types: Box::new([]),
ignored_types: Box::new([]),
active_trait_bounds: Box::new([]),
ignored_trait_bounds: Box::new([]),
parameter_trait_bounds: Box::new([]),
}
}
}
impl WhereClauseOptions {
/// Create [`WhereClauseOptions`] for a struct or enum type.
pub fn new<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
) -> Self {
Self::new_with_bounds(meta, active_fields, ignored_fields, |_| None, |_| None)
}
/// Create [`WhereClauseOptions`] for a simple value type.
pub fn new_value(meta: &ReflectMeta) -> Self {
Self::new_with_bounds(
meta,
std::iter::empty(),
std::iter::empty(),
|_| None,
|_| None,
)
}
/// Create [`WhereClauseOptions`] for a struct or enum type.
///
/// Compared to [`WhereClauseOptions::new`], this version allows you to specify
/// custom trait bounds for each field.
pub fn new_with_bounds<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
active_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
ignored_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
) -> Self {
let bevy_reflect_path = meta.bevy_reflect_path();
let is_from_reflect = meta.from_reflect().should_auto_derive();
let (active_types, active_trait_bounds): (Vec<_>, Vec<_>) = active_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else
|
;
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn extend_where_clause(
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (
|
{
quote!(#bevy_reflect_path::Reflect #custom_bounds)
}
|
conditional_block
|
utility.rs
|
std::iter::empty(),
|_| None,
|_| None,
)
}
/// Create [`WhereClauseOptions`] for a struct or enum type.
///
/// Compared to [`WhereClauseOptions::new`], this version allows you to specify
/// custom trait bounds for each field.
pub fn new_with_bounds<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
active_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
ignored_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
) -> Self {
let bevy_reflect_path = meta.bevy_reflect_path();
let is_from_reflect = meta.from_reflect().should_auto_derive();
let (active_types, active_trait_bounds): (Vec<_>, Vec<_>) = active_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else {
quote!(#bevy_reflect_path::Reflect #custom_bounds)
};
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn extend_where_clause(
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (not 1!)
///
/// #[reflect(skip_serializing)]
/// non_serialized_field: u32 // index: 1
/// }
/// ```
/// Would convert to the `0b01` bitset (i.e second field is NOT serialized)
///
pub(crate) fn members_to_serialization_denylist<T>(member_iter: T) -> BitSet<u32>
where
T: Iterator<Item = ReflectIgnoreBehavior>,
{
let mut bitset = BitSet::default();
member_iter.fold(0, |next_idx, member| match member {
ReflectIgnoreBehavior::IgnoreAlways => next_idx,
ReflectIgnoreBehavior::IgnoreSerialization => {
bitset.insert(next_idx);
next_idx + 1
}
ReflectIgnoreBehavior::None => next_idx + 1,
});
bitset
}
/// Turns an `Option<TokenStream>` into a `TokenStream` for an `Option`.
pub(crate) fn wrap_in_option(tokens: Option<proc_macro2::TokenStream>) -> proc_macro2::TokenStream {
match tokens {
Some(tokens) => quote! {
#FQOption::Some(#tokens)
},
None => quote! {
#FQOption::None
},
}
}
/// Contains tokens representing different kinds of string.
#[derive(Clone)]
pub(crate) enum StringExpr {
/// A string that is valid at compile time.
///
/// This is either a string literal like `"mystring"`,
/// or a string created by a macro like [`module_path`]
/// or [`concat`].
Const(proc_macro2::TokenStream),
/// A [string slice](str) that is borrowed for a `'static` lifetime.
Borrowed(proc_macro2::TokenStream),
/// An [owned string](String).
Owned(proc_macro2::TokenStream),
}
impl<T: ToString + Spanned> From<T> for StringExpr {
fn from(value: T) -> Self {
Self::from_lit(&LitStr::new(&value.to_string(), value.span()))
}
}
impl StringExpr {
/// Creates a [constant] [`StringExpr`] from a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_lit(lit: &LitStr) -> Self {
Self::Const(lit.to_token_stream())
|
}
/// Creates a [constant] [`StringExpr`] by interpreting a [string slice][str] as a [`struct@LitStr`].
///
|
random_line_split
|
|
utility.rs
|
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else {
quote!(#bevy_reflect_path::Reflect #custom_bounds)
};
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn extend_where_clause(
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (not 1!)
///
/// #[reflect(skip_serializing)]
/// non_serialized_field: u32 // index: 1
/// }
/// ```
/// Would convert to the `0b01` bitset (i.e second field is NOT serialized)
///
pub(crate) fn members_to_serialization_denylist<T>(member_iter: T) -> BitSet<u32>
where
T: Iterator<Item = ReflectIgnoreBehavior>,
{
let mut bitset = BitSet::default();
member_iter.fold(0, |next_idx, member| match member {
ReflectIgnoreBehavior::IgnoreAlways => next_idx,
ReflectIgnoreBehavior::IgnoreSerialization => {
bitset.insert(next_idx);
next_idx + 1
}
ReflectIgnoreBehavior::None => next_idx + 1,
});
bitset
}
/// Turns an `Option<TokenStream>` into a `TokenStream` for an `Option`.
pub(crate) fn wrap_in_option(tokens: Option<proc_macro2::TokenStream>) -> proc_macro2::TokenStream {
match tokens {
Some(tokens) => quote! {
#FQOption::Some(#tokens)
},
None => quote! {
#FQOption::None
},
}
}
/// Contains tokens representing different kinds of string.
#[derive(Clone)]
pub(crate) enum StringExpr {
/// A string that is valid at compile time.
///
/// This is either a string literal like `"mystring"`,
/// or a string created by a macro like [`module_path`]
/// or [`concat`].
Const(proc_macro2::TokenStream),
/// A [string slice](str) that is borrowed for a `'static` lifetime.
Borrowed(proc_macro2::TokenStream),
/// An [owned string](String).
Owned(proc_macro2::TokenStream),
}
impl<T: ToString + Spanned> From<T> for StringExpr {
fn from(value: T) -> Self {
Self::from_lit(&LitStr::new(&value.to_string(), value.span()))
}
}
impl StringExpr {
/// Creates a [constant] [`StringExpr`] from a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_lit(lit: &LitStr) -> Self {
Self::Const(lit.to_token_stream())
}
/// Creates a [constant] [`StringExpr`] by interpreting a [string slice][str] as a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_str(string: &str) -> Self {
Self::Const(string.into_token_stream())
}
/// Returns tokens for an [owned string](String).
///
/// The returned expression will allocate unless the [`StringExpr`] is [already owned].
///
/// [already owned]: StringExpr::Owned
pub fn into_owned(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => quote! {
::std::string::ToString::to_string(#tokens)
},
Self::Owned(owned) => owned,
}
}
/// Returns tokens for a statically borrowed [string slice](str).
pub fn into_borrowed(self) -> proc_macro2::TokenStream
|
{
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => tokens,
Self::Owned(owned) => quote! {
&#owned
},
}
}
|
identifier_body
|
|
utility.rs
|
let Bar(field) = Bar("hello"); // more common syntax
/// # }
/// ```
///
/// This function helps field access in context where you are declaring either
/// a tuple struct or a struct with named fields. If you don't have a field name,
/// it means you need to access the struct through an index.
pub(crate) fn ident_or_index(ident: Option<&Ident>, index: usize) -> Member {
ident.map_or_else(
|| Member::Unnamed(index.into()),
|ident| Member::Named(ident.clone()),
)
}
/// Options defining how to extend the `where` clause in reflection with any additional bounds needed.
pub(crate) struct WhereClauseOptions {
/// Type parameters that need extra trait bounds.
parameter_types: Box<[Ident]>,
/// Trait bounds to add to the type parameters.
parameter_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that will be reflected and need an extra trait bound
active_types: Box<[Type]>,
/// Trait bounds to add to the active types
active_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that won't be reflected and need an extra trait bound
ignored_types: Box<[Type]>,
/// Trait bounds to add to the ignored types
ignored_trait_bounds: Box<[proc_macro2::TokenStream]>,
}
impl Default for WhereClauseOptions {
/// By default, don't add any additional bounds to the `where` clause
fn default() -> Self {
Self {
parameter_types: Box::new([]),
active_types: Box::new([]),
ignored_types: Box::new([]),
active_trait_bounds: Box::new([]),
ignored_trait_bounds: Box::new([]),
parameter_trait_bounds: Box::new([]),
}
}
}
impl WhereClauseOptions {
/// Create [`WhereClauseOptions`] for a struct or enum type.
pub fn new<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
) -> Self {
Self::new_with_bounds(meta, active_fields, ignored_fields, |_| None, |_| None)
}
/// Create [`WhereClauseOptions`] for a simple value type.
pub fn new_value(meta: &ReflectMeta) -> Self {
Self::new_with_bounds(
meta,
std::iter::empty(),
std::iter::empty(),
|_| None,
|_| None,
)
}
/// Create [`WhereClauseOptions`] for a struct or enum type.
///
/// Compared to [`WhereClauseOptions::new`], this version allows you to specify
/// custom trait bounds for each field.
pub fn new_with_bounds<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
active_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
ignored_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
) -> Self {
let bevy_reflect_path = meta.bevy_reflect_path();
let is_from_reflect = meta.from_reflect().should_auto_derive();
let (active_types, active_trait_bounds): (Vec<_>, Vec<_>) = active_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else {
quote!(#bevy_reflect_path::Reflect #custom_bounds)
};
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn
|
(
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (not
|
extend_where_clause
|
identifier_name
|
views_ajax.py
|
account locking...'.format(strUsername))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if user and user.is_active:
request.session['login_username'] = strUsername
result = {'status': 0, 'msg': 'ok', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
result = loginAuthenticate(strUsername, strPassword)
if result['status'] == 0:
request.session['login_username'] = strUsername
return HttpResponse(json.dumps(result), content_type='application/json')
#Oracle SQL简单审核
@csrf_exempt
def orasimplecheck(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
else:
sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
finalResult = {'status':'ok', 'msg':'检测通过', 'data':[]}
#服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 'error'
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1] != ";":
finalResult['status'] = 'error'
finalResult['msg'] = 'Oracle SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip(';')
#使用explain plan进行自动审核
try:
resultList = daoora.sqlAutoreview(sqlContent, clusterName)
except Exception as err:
finalResult['status'] = 'error'
finalResult['msg'] = str(err)
else:
for result in resultList:
if result['stage'] != 'CHECKED':
finalResult['status'] = 'error'
finalResult['msg'] = result['errormessage']+' -- '+result['sql']
#return HttpResponse(json.dumps(finalResult), content_type='application/json')
#要把result转成JSON存进数据库里,方便SQL单子详细信息展示
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步表数据字典
@csrf_exempt
def syncoradict(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
listCluster = [primary.cluster_name for primary in primaries]
clusterListSync = request.POST.get('cluster_list_sync')
if clusterListSync:
clusterListSync=json.loads(clusterListSync)
ctl = operation_ctl.objects.get(data_type='数据字典' ,opt_type='同步')
if ctl.status == '进行中':
finalResult = {'status':'error','msg':'有任务进行中'}
else:
ctl.status='进行中'
ctl.save()
syncDictData.delay(clusterListSync)
finalResult = {'status':'ok'}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
finalResult = {'listCluster':listCluster}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步ldap用户到数据库
@csrf_exempt
def syncldapuser(request):
if not settings.ENABLE_LDAP:
result = {'msg': 'LDAP支持未开启'}
return HttpResponse(json.dumps(result), content_type='application/json')
ldapback = LDAPBackend()
ldap = ldapback.ldap
ldapconn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
tls = getattr(settings, 'AUTH_LDAP_START_TLS', None)
if tls:
ldapconn.start_tls_s()
binddn = settings.AUTH_LDAP_BIND_DN
bind_password = settings.AUTH_LDAP_BIND_PASSWORD
basedn = settings.AUTH_LDAP_BASEDN
ldapconn.simple_bind_s(binddn, bind_password)
ldapusers = ldapconn.search_s(basedn, ldap.SCOPE_SUBTREE, 'objectclass=*', attrlist=settings.AUTH_LDAP_USER_ATTRLIST)
#ldap中username存在条目的第一个元素的uid中,定义的username_field不再使用,改为截取user_tag
display_field = settings.AUTH_LDAP_USER_ATTR_MAP['display']
email_field = settings.AUTH_LDAP_USER_ATTR_MAP['email']
count = 0
try:
for user in ldapusers:
user_tag=user[0].split(',')
user_attr = user[1]
if user_tag and user_attr:
username = user_tag[0][user_tag[0].find('=')+1:].encode()
display = user_attr.get(display_field,['none'.encode(),])[0]
email = user_attr.get(email_field,['none'.encode(),])[0]
already_user = users.objects.filter(username=username.decode()).filter(is_ldapuser=True)
if len(already_user) == 0:
u = users(username=username.decode(), display=display.decode(), email=email.decode(), is_ldapuser=True,is_active=0)
u.save()
count += 1
except Exception as err:
result = {'msg': '用户{0}导入错误:{1}'.format(username,str(err))}
return HttpResponse(json.dumps(result))
else:
result = {'msg': '同步{}个用户.'.format(count)}
return HttpResponse(json.dumps(result), content_type='application/json')
#请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = daoora.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = daoora.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""}}
|
elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status":4, "msg":"该行SQL不是由pt-OSC执行的", "data":""}
else:
pctResult = {"status":-2, "msg":"整个工单不由pt-OSC执行", "data":""}
return HttpResponse(json.dumps(pctResult), content_type='application/json')
|
random_line_split
|
|
views_ajax.py
|
result = {'msg': 'l
|
#Oracle SQL简单审核
@csrf_exempt
def orasimplecheck(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
else:
sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
finalResult = {'status':'ok', 'msg':'检测通过', 'data':[]}
#服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 'error'
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1
] != ";":
finalResult['status'] = 'error'
finalResult['msg'] = 'Oracle SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip(';')
#使用explain plan进行自动审核
try:
resultList = daoora.sqlAutoreview(sqlContent, clusterName)
except Exception as err:
finalResult['status'] = 'error'
finalResult['msg'] = str(err)
else:
for result in resultList:
if result['stage'] != 'CHECKED':
finalResult['status'] = 'error'
finalResult['msg'] = result['errormessage']+' -- '+result['sql']
#return HttpResponse(json.dumps(finalResult), content_type='application/json')
#要把result转成JSON存进数据库里,方便SQL单子详细信息展示
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步表数据字典
@csrf_exempt
def syncoradict(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
listCluster = [primary.cluster_name for primary in primaries]
clusterListSync = request.POST.get('cluster_list_sync')
if clusterListSync:
clusterListSync=json.loads(clusterListSync)
ctl = operation_ctl.objects.get(data_type='数据字典' ,opt_type='同步')
if ctl.status == '进行中':
finalResult = {'status':'error','msg':'有任务进行中'}
else:
ctl.status='进行中'
ctl.save()
syncDictData.delay(clusterListSync)
finalResult = {'status':'ok'}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
finalResult = {'listCluster':listCluster}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步ldap用户到数据库
@csrf_exempt
def syncldapuser(request):
if not settings.ENABLE_LDAP:
result = {'msg': 'LDAP支持未开启'}
return HttpResponse(json.dumps(result), content_type='application/json')
ldapback = LDAPBackend()
ldap = ldapback.ldap
ldapconn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
tls = getattr(settings, 'AUTH_LDAP_START_TLS', None)
if tls:
ldapconn.start_tls_s()
binddn = settings.AUTH_LDAP_BIND_DN
bind_password = settings.AUTH_LDAP_BIND_PASSWORD
basedn = settings.AUTH_LDAP_BASEDN
ldapconn.simple_bind_s(binddn, bind_password)
ldapusers = ldapconn.search_s(basedn, ldap.SCOPE_SUBTREE, 'objectclass=*', attrlist=settings.AUTH_LDAP_USER_ATTRLIST)
#ldap中username存在条目的第一个元素的uid中,定义的username_field不再使用,改为截取user_tag
display_field = settings.AUTH_LDAP_USER_ATTR_MAP['display']
email_field = settings.AUTH_LDAP_USER_ATTR_MAP['email']
count = 0
try:
for user in ldapusers:
user_tag=user[0].split(',')
user_attr = user[1]
if user_tag and user_attr:
username = user_tag[0][user_tag[0].find('=')+1:].encode()
display = user_attr.get(display_field,['none'.encode(),])[0]
email = user_attr.get(email_field,['none'.encode(),])[0]
already_user = users.objects.filter(username=username.decode()).filter(is_ldapuser=True)
if len(already_user) == 0:
u = users(username=username.decode(), display=display.decode(), email=email.decode(), is_ldapuser=True,is_active=0)
u.save()
count += 1
except Exception as err:
result = {'msg': '用户{0}导入错误:{1}'.format(username,str(err))}
return HttpResponse(json.dumps(result))
else:
result = {'msg': '同步{}个用户.'.format(count)}
return HttpResponse(json.dumps(result), content_type='application/json')
#请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = daoora.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = daoora.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""}}
|
dap authorization failed'}
return HttpResponse(json.dumps(result), content_type='application/json')
if strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (
datetime.datetime.now() - login_failure_counter[strUsername][
"last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if user and user.is_active:
request.session['login_username'] = strUsername
result = {'status': 0, 'msg': 'ok', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
result = loginAuthenticate(strUsername, strPassword)
if result['status'] == 0:
request.session['login_username'] = strUsername
return HttpResponse(json.dumps(result), content_type='application/json')
|
identifier_body
|
views_ajax.py
|
result = {'msg': 'ldap authorization failed'}
return HttpResponse(json.dumps(result), content_type='application/json')
if strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (
datetime.datetime.now() - login_failure_counter[strUsername][
"last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if user and user.is_active:
request.session['login_username'] = strUsername
result = {'status': 0, 'msg': 'ok', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
result = loginAuthenticate(strUsername, strPassword)
if result['status'] == 0:
request.session['login_username'] = strUsername
return HttpRespo
|
sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
finalResult = {'status':'ok', 'msg':'检测通过', 'data':[]}
#服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 'error'
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1] != ";":
finalResult['status'] = 'error'
finalResult['msg'] = 'Oracle SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip(';')
#使用explain plan进行自动审核
try:
resultList = daoora.sqlAutoreview(sqlContent, clusterName)
except Exception as err:
finalResult['status'] = 'error'
finalResult['msg'] = str(err)
else:
for result in resultList:
if result['stage'] != 'CHECKED':
finalResult['status'] = 'error'
finalResult['msg'] = result['errormessage']+' -- '+result['sql']
#return HttpResponse(json.dumps(finalResult), content_type='application/json')
#要把result转成JSON存进数据库里,方便SQL单子详细信息展示
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步表数据字典
@csrf_exempt
def syncoradict(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
listCluster = [primary.cluster_name for primary in primaries]
clusterListSync = request.POST.get('cluster_list_sync')
if clusterListSync:
clusterListSync=json.loads(clusterListSync)
ctl = operation_ctl.objects.get(data_type='数据字典' ,opt_type='同步')
if ctl.status == '进行中':
finalResult = {'status':'error','msg':'有任务进行中'}
else:
ctl.status='进行中'
ctl.save()
syncDictData.delay(clusterListSync)
finalResult = {'status':'ok'}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
finalResult = {'listCluster':listCluster}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步ldap用户到数据库
@csrf_exempt
def syncldapuser(request):
if not settings.ENABLE_LDAP:
result = {'msg': 'LDAP支持未开启'}
return HttpResponse(json.dumps(result), content_type='application/json')
ldapback = LDAPBackend()
ldap = ldapback.ldap
ldapconn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
tls = getattr(settings, 'AUTH_LDAP_START_TLS', None)
if tls:
ldapconn.start_tls_s()
binddn = settings.AUTH_LDAP_BIND_DN
bind_password = settings.AUTH_LDAP_BIND_PASSWORD
basedn = settings.AUTH_LDAP_BASEDN
ldapconn.simple_bind_s(binddn, bind_password)
ldapusers = ldapconn.search_s(basedn, ldap.SCOPE_SUBTREE, 'objectclass=*', attrlist=settings.AUTH_LDAP_USER_ATTRLIST)
#ldap中username存在条目的第一个元素的uid中,定义的username_field不再使用,改为截取user_tag
display_field = settings.AUTH_LDAP_USER_ATTR_MAP['display']
email_field = settings.AUTH_LDAP_USER_ATTR_MAP['email']
count = 0
try:
for user in ldapusers:
user_tag=user[0].split(',')
user_attr = user[1]
if user_tag and user_attr:
username = user_tag[0][user_tag[0].find('=')+1:].encode()
display = user_attr.get(display_field,['none'.encode(),])[0]
email = user_attr.get(email_field,['none'.encode(),])[0]
already_user = users.objects.filter(username=username.decode()).filter(is_ldapuser=True)
if len(already_user) == 0:
u = users(username=username.decode(), display=display.decode(), email=email.decode(), is_ldapuser=True,is_active=0)
u.save()
count += 1
except Exception as err:
result = {'msg': '用户{0}导入错误:{1}'.format(username,str(err))}
return HttpResponse(json.dumps(result))
else:
result = {'msg': '同步{}个用户.'.format(count)}
return HttpResponse(json.dumps(result), content_type='application/json')
#请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = daoora.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = daoora.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""
|
nse(json.dumps(result), content_type='application/json')
#Oracle SQL简单审核
@csrf_exempt
def orasimplecheck(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
else:
|
conditional_block
|
views_ajax.py
|
ReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""}}
elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status":4, "msg":"该行SQL不是由pt-OSC执行的", "data":""}
else:
pctResult = {"status":-2, "msg":"整个工单不由pt-OSC执行", "data":""}
return HttpResponse(json.dumps(pctResult), content_type='application/json')
@csrf_exempt
def getWorkflowStatus(request):
"""获取某个工单的当前状态"""
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None :
context = {"status":-1 ,'msg': 'workflowId参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
workflowDetail = get_object_or_404(workflow, pk=workflowId)
workflowStatus = workflowDetail.status
result = {"status":workflowStatus, "msg":"", "data":""}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def stopOscProgress(request):
"""中止该SQL的pt-OSC进程"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
loginUser = request.session.get('login_username', False)
workflowDetail = workflow.objects.get(id=workflowId)
try:
listAllReviewMen = json.loads(workflowDetail.review_man)
except ValueError:
listAllReviewMen = (workflowDetail.review_man, )
#服务器端二次验证,当前工单状态必须为等待人工审核,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
if workflowDetail.status != Const.workflowStatus['executing']:
context = {"status":-1, "msg":'当前工单状态不是"执行中",请刷新当前页面!', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
if loginUser is None or loginUser not in listAllReviewMen:
context = {"status":-1 ,'msg': '当前登录用户不是审核人,请重新登录.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
optResult = inceptionDao.stopOscProgress(sqlSHA1)
else:
optResult = {"status":4, "msg":"不是由pt-OSC执行的", "data":""}
return HttpRespense(json.dumps(optResult), content_type='application/json')
@csrf_exempt
def manExec(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['autoreviewwrong'],))
except Exception:
result = {'msg': '已经在处理'}
return HttpResponse(json.dumps(result), content_type='application/json')
try:
reviewMen = json.loads(workflowDetail.review_man)
except Exception:
reviewMen = workflowDetail.review_man
if not loginUser in reviewMen:
result = {'msg': '你不在审核人之列'}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.status = Const.workflowStatus['manexec']
workflowDetail.operator = loginUser
try:
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '更改状态为手工执行'
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
#获取当前请求url
def _getDetailUrl(request):
scheme = request.scheme
#host = request.META['HTTP_HOST']
host = getattr(settings,'WAN_HOST')
return "%s://%s/detail/" % (scheme, host)
@csrf_exempt
def manFinish(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
executeStatus = request.POST['status']
executeResult = request.POST['content']
workflowDetail = workflow.objects.get(id=workflowId)
if loginUser != workflowDetail.operator:
result = {"status":-1,"msg":"需要处理人操作"}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.execute_result = executeResult
if executeStatus == '0':
workflowDetail.status = Const.workflowStatus['manexcept']
elif executeStatus == '1':
workflowDetail.status = Const.workflowStatus['manfinish']
try:
workflowDetail.operator = loginUser
workflowDetail.finish_time = getNow()
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '保存成功'
#如果执行完毕了,则根据settings.py里的配置决定是否给提交者和DBA一封邮件提醒.DBA需要知晓审核并执行过的单子
url = _getDetailUrl(request) + str(workflowId) + '/'
#给主、副审核人,申请人,DBA各发一封邮件
engineer = workflowDetail.engineer
operator = workflowDetail.operator
workflowStatus = workflowDetail.status
workflowName = workflowDetail.workflow_name
objEngineer = users.objects.get(username=engineer)
strTitle = "SQL上线工单执行完毕 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n操作人:" + operator + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus
mailDba.delay(strTitle, strContent, [objEngineer.email])
wechatDba.delay(strTitle, strContent,objEngineer.wechat_account)
dingDba.delay(strContent,objEngineer.mobile)
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='applicatio
|
n/json')
|
identifier_name
|
|
consts.go
|
// Default redundancy parameters.
var (
// syncCheckInterval is how often the repair heap checks the consensus code
// to see if the renter is synced. This is created because the contractor
// may not update the synced channel until a block is received under some
// conditions.
syncCheckInterval = build.Select(build.Var{
Dev: time.Second * 3,
Standard: time.Second * 5,
Testing: time.Second,
}).(time.Duration)
// cachedUtilitiesUpdateInterval is how often the renter updates the
// cachedUtilities.
cachedUtilitiesUpdateInterval = build.Select(build.Var{
Dev: time.Minute,
Standard: time.Minute * 10,
Testing: time.Second * 3,
}).(time.Duration)
)
// Default memory usage parameters.
var (
// registryMemoryDefault establishes the default amount of memory that the
// renter will use when performing registry operations. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
registryMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userUploadMemoryDefault establishes the default amount of memory that the
// renter will use when performing user-initiated uploads. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
userUploadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userDownloadMemoryDefault establishes the default amount of memory that
// the renter will use when performing user-initiated downloads. The mapping
// is currently not perfect due to GC overhead and other places where we
// don't count all of the memory usage accurately.
userDownloadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// repairMemoryDefault establishes the default amount of memory that the
// renter will use when performing system-scheduld uploads and downloads.
// The mapping is currently not perfect due to GC overhead and other places
// where we don't count all of the memory usage accurately.
repairMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 31), // 2.0 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// registryMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
registryMemoryPriorityDefault = uint64(0)
// userUploadMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
userUploadMemoryPriorityDefault = uint64(0)
// userDownloadMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
userDownloadMemoryPriorityDefault = uint64(0)
// repairMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
repairMemoryPriorityDefault = repairMemoryDefault / 4
// gcMemoryThreshold is the amount of memory after which a memory manager
// triggers a garbage collection.
gcMemoryThreshold = uint64(1 << 28) // 256 MiB
// initialStreamerCacheSize defines the cache size that each streamer will
// start using when it is created. A lower initial cache size will mean that
// it will take more requests / round trips for the cache to grow, however
// the cache size gets set to at least 2x the minimum read size initially
// anyway, which means any application doing very large reads is going to
// automatically have the cache size stepped up without having to do manual
// growth.
initialStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 13), // 8 KiB
Standard: int64(1 << 19), // 512 KiB
Testing: int64(1 << 10), // 1 KiB
}).(int64)
// maxStreamerCacheSize defines the maximum cache size that each streamer
// will use before it no longer increases its own cache size. The value has
// been set fairly low because some applications like mpv will request very
// large buffer sizes, taking as much data as fast as they can. This results
// in the cache size on Sia's end growing to match the size of the
// requesting application's buffer, and harms seek times. Maintaining a low
// maximum ensures that runaway growth is kept under at least a bit of
// control.
//
// This would be best resolved by knowing the actual bitrate of the data
// being fed to the user instead of trying to guess a bitrate, however as of
// time of writing we don't have an easy way to get that information.
maxStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 20), // 1 MiB
Standard: int64(1 << 25), // 32 MiB
Testing: int64(1 << 13), // 8 KiB
}).(int64)
)
// Default bandwidth usage parameters.
const (
// DefaultMaxDownloadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxDownloadSpeed through the API
DefaultMaxDownloadSpeed = 0
// DefaultMaxUploadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxUploadSpeed through the API
DefaultMaxUploadSpeed = 0
)
// Naming conventions for code readability.
const (
// destinationTypeSeekStream is the destination type used for downloads
// from the /renter/stream endpoint.
destinationTypeSeekStream = "httpseekstream"
// memoryPriorityLow is used to request low priority memory
memoryPriorityLow = false
// memoryPriorityHigh is used to request high priority memory
memoryPriorityHigh = true
)
// Constants that tune the health and repair processes.
const (
// maxConsecutiveDirHeapFailures is the maximum number of consecutive times
// the repair heap is allowed to fail to get a directory from the Directory
// Heap
maxConsecutiveDirHeapFailures = 5
// maxRandomStuckChunksAddToHeap is the maximum number of random stuck
// chunks that the stuck loop will add to the uploadHeap at a time. Random
// stuck chunks are the stuck chunks chosen at random from the file system
// as opposed to stuck chunks chosen from a previously successful file
maxRandomStuckChunksAddToHeap = 5
// maxRandomStuckChunksInHeap is the maximum number of random stuck chunks
// that the stuck loop will try to keep in the uploadHeap. Random stuck
// chunks are the stuck chunks chosen at random from the file system as
// opposed to stuck chunks chosen from previously successful file
maxRandomStuckChunksInHeap = 10
// maxStuckChunksInHeap is the maximum number of stuck chunks that the stuck
// loop will try to keep in the uploadHeap
maxStuckChunksInHeap = 25
)
var (
// healthCheckInterval defines the maximum amount of time that should pass
// in between checking the health of a file or directory.
healthCheckInterval = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 5 * time.Second,
}).(time.Duration)
// healthLoopErrorSleepDuration indicates how long the health loop should
// sleep before retrying if there is an error preventing progress.
healthLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard:
|
{
return fmt.Sprintf("Siafile '%v' has a health of %v and redundancy of %v", siaPath.String(), health, redundancy)
}
|
identifier_body
|
|
consts.go
|
(siaPath modules.SiaPath, health, redundancy float64) string {
return fmt.Sprintf("Siafile '%v' has a health of %v and redundancy of %v", siaPath.String(), health, redundancy)
}
// Default redundancy parameters.
var (
// syncCheckInterval is how often the repair heap checks the consensus code
// to see if the renter is synced. This is created because the contractor
// may not update the synced channel until a block is received under some
// conditions.
syncCheckInterval = build.Select(build.Var{
Dev: time.Second * 3,
Standard: time.Second * 5,
Testing: time.Second,
}).(time.Duration)
// cachedUtilitiesUpdateInterval is how often the renter updates the
// cachedUtilities.
cachedUtilitiesUpdateInterval = build.Select(build.Var{
Dev: time.Minute,
Standard: time.Minute * 10,
Testing: time.Second * 3,
}).(time.Duration)
)
// Default memory usage parameters.
var (
// registryMemoryDefault establishes the default amount of memory that the
// renter will use when performing registry operations. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
registryMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userUploadMemoryDefault establishes the default amount of memory that the
// renter will use when performing user-initiated uploads. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
userUploadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userDownloadMemoryDefault establishes the default amount of memory that
// the renter will use when performing user-initiated downloads. The mapping
// is currently not perfect due to GC overhead and other places where we
// don't count all of the memory usage accurately.
userDownloadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// repairMemoryDefault establishes the default amount of memory that the
// renter will use when performing system-scheduld uploads and downloads.
// The mapping is currently not perfect due to GC overhead and other places
// where we don't count all of the memory usage accurately.
repairMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 31), // 2.0 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// registryMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
registryMemoryPriorityDefault = uint64(0)
// userUploadMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
userUploadMemoryPriorityDefault = uint64(0)
// userDownloadMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
userDownloadMemoryPriorityDefault = uint64(0)
// repairMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
repairMemoryPriorityDefault = repairMemoryDefault / 4
// gcMemoryThreshold is the amount of memory after which a memory manager
// triggers a garbage collection.
gcMemoryThreshold = uint64(1 << 28) // 256 MiB
// initialStreamerCacheSize defines the cache size that each streamer will
// start using when it is created. A lower initial cache size will mean that
// it will take more requests / round trips for the cache to grow, however
// the cache size gets set to at least 2x the minimum read size initially
// anyway, which means any application doing very large reads is going to
// automatically have the cache size stepped up without having to do manual
// growth.
initialStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 13), // 8 KiB
Standard: int64(1 << 19), // 512 KiB
Testing: int64(1 << 10), // 1 KiB
}).(int64)
// maxStreamerCacheSize defines the maximum cache size that each streamer
// will use before it no longer increases its own cache size. The value has
// been set fairly low because some applications like mpv will request very
// large buffer sizes, taking as much data as fast as they can. This results
// in the cache size on Sia's end growing to match the size of the
// requesting application's buffer, and harms seek times. Maintaining a low
// maximum ensures that runaway growth is kept under at least a bit of
// control.
//
// This would be best resolved by knowing the actual bitrate of the data
// being fed to the user instead of trying to guess a bitrate, however as of
// time of writing we don't have an easy way to get that information.
maxStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 20), // 1 MiB
Standard: int64(1 << 25), // 32 MiB
Testing: int64(1 << 13), // 8 KiB
}).(int64)
)
// Default bandwidth usage parameters.
const (
// DefaultMaxDownloadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxDownloadSpeed through the API
DefaultMaxDownloadSpeed = 0
// DefaultMaxUploadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxUploadSpeed through the API
DefaultMaxUploadSpeed = 0
)
// Naming conventions for code readability.
const (
// destinationTypeSeekStream is the destination type used for downloads
// from the /renter/stream endpoint.
destinationTypeSeekStream = "httpseekstream"
// memoryPriorityLow is used to request low priority memory
memoryPriorityLow = false
// memoryPriorityHigh is used to request high priority memory
memoryPriorityHigh = true
)
// Constants that tune the health and repair processes.
const (
// maxConsecutiveDirHeapFailures is the maximum number of consecutive times
// the repair heap is allowed to fail to get a directory from the Directory
// Heap
maxConsecutiveDirHeapFailures = 5
// maxRandomStuckChunksAddToHeap is the maximum number of random stuck
// chunks that the stuck loop will add to the uploadHeap at a time. Random
// stuck chunks are the stuck chunks chosen at random from the file system
// as opposed to stuck chunks chosen from a previously successful file
maxRandomStuckChunksAddToHeap = 5
// maxRandomStuckChunksInHeap is the maximum number of random stuck chunks
// that the stuck loop will try to keep in the uploadHeap. Random stuck
// chunks are the stuck chunks chosen at random from the file system as
// opposed to stuck chunks chosen from previously successful file
maxRandomStuckChunksInHeap = 10
// maxStuckChunksInHeap is the maximum number of stuck chunks that the stuck
// loop will try to keep in the uploadHeap
maxStuckChunksInHeap = 25
)
var (
// healthCheckInterval defines the maximum amount of time that should pass
// in between checking the health of a file or directory.
healthCheckInterval = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 5 * time.Second,
}).(time.Duration)
// healthLoopErrorSleepDuration indicates how long the health loop should
// sleep before retrying if there is an error preventing progress.
healthLoopError
|
AlertCauseSiafileLowRedundancy
|
identifier_name
|
|
consts.go
|
// syncCheckInterval is how often the repair heap checks the consensus code
// to see if the renter is synced. This is created because the contractor
// may not update the synced channel until a block is received under some
// conditions.
syncCheckInterval = build.Select(build.Var{
Dev: time.Second * 3,
Standard: time.Second * 5,
Testing: time.Second,
}).(time.Duration)
// cachedUtilitiesUpdateInterval is how often the renter updates the
// cachedUtilities.
cachedUtilitiesUpdateInterval = build.Select(build.Var{
Dev: time.Minute,
Standard: time.Minute * 10,
Testing: time.Second * 3,
}).(time.Duration)
)
// Default memory usage parameters.
var (
// registryMemoryDefault establishes the default amount of memory that the
// renter will use when performing registry operations. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
registryMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userUploadMemoryDefault establishes the default amount of memory that the
// renter will use when performing user-initiated uploads. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
userUploadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userDownloadMemoryDefault establishes the default amount of memory that
// the renter will use when performing user-initiated downloads. The mapping
// is currently not perfect due to GC overhead and other places where we
// don't count all of the memory usage accurately.
userDownloadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// repairMemoryDefault establishes the default amount of memory that the
// renter will use when performing system-scheduld uploads and downloads.
// The mapping is currently not perfect due to GC overhead and other places
// where we don't count all of the memory usage accurately.
repairMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 31), // 2.0 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// registryMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
registryMemoryPriorityDefault = uint64(0)
// userUploadMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
userUploadMemoryPriorityDefault = uint64(0)
// userDownloadMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
userDownloadMemoryPriorityDefault = uint64(0)
// repairMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
repairMemoryPriorityDefault = repairMemoryDefault / 4
// gcMemoryThreshold is the amount of memory after which a memory manager
// triggers a garbage collection.
gcMemoryThreshold = uint64(1 << 28) // 256 MiB
// initialStreamerCacheSize defines the cache size that each streamer will
// start using when it is created. A lower initial cache size will mean that
// it will take more requests / round trips for the cache to grow, however
// the cache size gets set to at least 2x the minimum read size initially
// anyway, which means any application doing very large reads is going to
// automatically have the cache size stepped up without having to do manual
// growth.
initialStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 13), // 8 KiB
Standard: int64(1 << 19), // 512 KiB
Testing: int64(1 << 10), // 1 KiB
}).(int64)
// maxStreamerCacheSize defines the maximum cache size that each streamer
// will use before it no longer increases its own cache size. The value has
// been set fairly low because some applications like mpv will request very
// large buffer sizes, taking as much data as fast as they can. This results
// in the cache size on Sia's end growing to match the size of the
// requesting application's buffer, and harms seek times. Maintaining a low
// maximum ensures that runaway growth is kept under at least a bit of
// control.
//
// This would be best resolved by knowing the actual bitrate of the data
// being fed to the user instead of trying to guess a bitrate, however as of
// time of writing we don't have an easy way to get that information.
maxStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 20), // 1 MiB
Standard: int64(1 << 25), // 32 MiB
Testing: int64(1 << 13), // 8 KiB
}).(int64)
)
// Default bandwidth usage parameters.
const (
// DefaultMaxDownloadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxDownloadSpeed through the API
DefaultMaxDownloadSpeed = 0
// DefaultMaxUploadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxUploadSpeed through the API
DefaultMaxUploadSpeed = 0
)
// Naming conventions for code readability.
const (
// destinationTypeSeekStream is the destination type used for downloads
// from the /renter/stream endpoint.
destinationTypeSeekStream = "httpseekstream"
// memoryPriorityLow is used to request low priority memory
memoryPriorityLow = false
// memoryPriorityHigh is used to request high priority memory
memoryPriorityHigh = true
)
// Constants that tune the health and repair processes.
const (
// maxConsecutiveDirHeapFailures is the maximum number of consecutive times
// the repair heap is allowed to fail to get a directory from the Directory
// Heap
maxConsecutiveDirHeapFailures = 5
// maxRandomStuckChunksAddToHeap is the maximum number of random stuck
// chunks that the stuck loop will add to the uploadHeap at a time. Random
// stuck chunks are the stuck chunks chosen at random from the file system
// as opposed to stuck chunks chosen from a previously successful file
maxRandomStuckChunksAddToHeap = 5
// maxRandomStuckChunksInHeap is the maximum number of random stuck chunks
// that the stuck loop will try to keep in the uploadHeap. Random stuck
// chunks are the stuck chunks chosen at random from the file system as
// opposed to stuck chunks chosen from previously successful file
maxRandomStuckChunksInHeap = 10
// maxStuckChunksInHeap is the maximum number of stuck chunks that the stuck
// loop will try to keep in the uploadHeap
maxStuckChunksInHeap = 25
)
var (
// healthCheckInterval defines the maximum amount of time that should pass
// in between checking the health of a file or directory.
healthCheckInterval = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 5 * time.Second,
}).(time.Duration)
// healthLoopErrorSleepDuration indicates how long the health loop should
// sleep before retrying if there is an error preventing progress.
healthLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 3
|
return fmt.Sprintf("Siafile '%v' has a health of %v and redundancy of %v", siaPath.String(), health, redundancy)
}
// Default redundancy parameters.
var (
|
random_line_split
|
|
MADDPGAgent.py
|
AMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
|
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action
|
state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1
|
random_line_split
|
MADDPGAgent.py
|
AMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
|
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action
|
return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy
|
conditional_block
|
MADDPGAgent.py
|
AMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""In
|
def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([
|
itialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
|
identifier_body
|
MADDPGAgent.py
|
AMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def
|
(self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e
|
act
|
identifier_name
|
server.go
|
"fmt"
"math/rand"
"net"
"net/http"
"regexp"
"sort"
"strings"
"time"
"github.com/casbin/casbin/v2"
"github.com/cesanta/glog"
"github.com/docker/distribution/registry/auth/token"
"github.com/cesanta/docker_auth/auth_server/api"
"github.com/cesanta/docker_auth/auth_server/authn"
"github.com/cesanta/docker_auth/auth_server/authz"
)
var (
hostPortRegex = regexp.MustCompile(`^(?:\[(.+)\]:\d+|([^:]+):\d+)$`)
scopeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`)
)
type AuthServer struct {
config *Config
authenticators []api.Authenticator
authorizers []api.Authorizer
ga *authn.GoogleAuth
gha *authn.GitHubAuth
oidc *authn.OIDCAuth
glab *authn.GitlabAuth
}
func NewAuthServer(c *Config) (*AuthServer, error) {
as := &AuthServer{
config: c,
authorizers: []api.Authorizer{},
}
if c.ACL != nil {
staticAuthorizer, err := authz.NewACLAuthorizer(c.ACL)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, staticAuthorizer)
}
if c.ACLMongo != nil {
mongoAuthorizer, err := authz.NewACLMongoAuthorizer(c.ACLMongo)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, mongoAuthorizer)
}
if c.ACLXorm != nil {
xormAuthorizer, err := authz.NewACLXormAuthz(c.ACLXorm)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, xormAuthorizer)
}
if c.ExtAuthz != nil {
extAuthorizer := authz.NewExtAuthzAuthorizer(c.ExtAuthz)
as.authorizers = append(as.authorizers, extAuthorizer)
}
if c.Users != nil {
as.authenticators = append(as.authenticators, authn.NewStaticUserAuth(c.Users))
}
if c.ExtAuth != nil {
as.authenticators = append(as.authenticators, authn.NewExtAuth(c.ExtAuth))
}
if c.GoogleAuth != nil {
ga, err := authn.NewGoogleAuth(c.GoogleAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ga)
as.ga = ga
}
if c.GitHubAuth != nil {
gha, err := authn.NewGitHubAuth(c.GitHubAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, gha)
as.gha = gha
}
if c.OIDCAuth != nil {
oidc, err := authn.NewOIDCAuth(c.OIDCAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, oidc)
as.oidc = oidc
}
if c.GitlabAuth != nil {
glab, err := authn.NewGitlabAuth(c.GitlabAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, glab)
as.glab = glab
}
if c.LDAPAuth != nil {
la, err := authn.NewLDAPAuth(c.LDAPAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, la)
}
if c.MongoAuth != nil {
ma, err := authn.NewMongoAuth(c.MongoAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ma)
}
if c.XormAuthn != nil {
xa, err := authn.NewXormAuth(c.XormAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, xa)
}
if c.PluginAuthn != nil {
pluginAuthn, err := authn.NewPluginAuthn(c.PluginAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, pluginAuthn)
}
if c.PluginAuthz != nil {
pluginAuthz, err := authz.NewPluginAuthzAuthorizer(c.PluginAuthz)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, pluginAuthz)
}
if c.CasbinAuthz != nil {
enforcer, err := casbin.NewEnforcer(c.CasbinAuthz.ModelFilePath, c.CasbinAuthz.PolicyFilePath)
if err != nil {
return nil, err
}
casbinAuthz, err := authz.NewCasbinAuthorizer(enforcer)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, casbinAuthz)
}
return as, nil
}
type authRequest struct {
RemoteConnAddr string
RemoteAddr string
RemoteIP net.IP
User string
Password api.PasswordString
Account string
Service string
Scopes []authScope
Labels api.Labels
}
type authScope struct {
Type string
Class string
Name string
Actions []string
}
type authzResult struct {
scope authScope
autorizedActions []string
}
func (ar authRequest) String() string {
return fmt.Sprintf("{%s:%s@%s %s}", ar.User, ar.Password, ar.RemoteAddr, ar.Scopes)
}
func parseRemoteAddr(ra string) net.IP {
hp := hostPortRegex.FindStringSubmatch(ra)
if hp != nil {
if hp[1] != "" {
ra = hp[1]
} else if hp[2] != "" {
ra = hp[2]
}
}
res := net.ParseIP(ra)
return res
}
func parseScope(scope string) (string, string, error) {
parts := scopeRegex.FindStringSubmatch(scope)
if parts == nil {
return "", "", fmt.Errorf("malformed scope request")
}
switch len(parts) {
case 3:
return parts[1], "", nil
case 4:
return parts[1], parts[3], nil
default:
return "", "", fmt.Errorf("malformed scope request")
}
}
func (as *AuthServer) ParseRequest(req *http.Request) (*authRequest, error) {
ar := &authRequest{RemoteConnAddr: req.RemoteAddr, RemoteAddr: req.RemoteAddr}
if as.config.Server.RealIPHeader != "" {
hv := req.Header.Get(as.config.Server.RealIPHeader)
ips := strings.Split(hv, ",")
realIPPos := as.config.Server.RealIPPos
if realIPPos < 0 {
realIPPos = len(ips) + realIPPos
if realIPPos < 0 {
realIPPos = 0
}
}
ar.RemoteAddr = strings.TrimSpace(ips[realIPPos])
glog.V(3).Infof("Conn ip %s, %s: %s, addr: %s", ar.RemoteAddr, as.config.Server.RealIPHeader, hv, ar.RemoteAddr)
if ar.RemoteAddr == "" {
return nil, fmt.Errorf("client address not provided")
}
}
ar.RemoteIP = parseRemoteAddr(ar.RemoteAddr)
if ar.RemoteIP == nil {
return nil, fmt.Errorf("unable to parse remote addr %s", ar.RemoteAddr)
}
user, password, haveBasicAuth := req.BasicAuth()
if haveBasicAuth {
ar.User = user
ar.Password = api.PasswordString(password)
} else if req.Method == "POST" {
// username and password could be part of form data
username := req.FormValue("username")
password := req.FormValue("password")
if username != "" && password != "" {
ar.User = username
ar.Password = api.PasswordString(password)
}
}
ar.Account = req.FormValue("account")
if ar.Account == "" {
ar.Account = ar.User
} else if haveBasicAuth && ar.Account != ar.User {
return nil, fmt.Errorf("user and account are not the same (%q vs %q)", ar.User, ar.Account)
}
ar.Service = req.FormValue("service")
if err := req.ParseForm(); err != nil {
return nil, fmt.Errorf("invalid form value")
}
// https://github.com/docker/distribution/blob/1b9ab303a477ded9bdd3fc97e9119fa8f9e58fca/docs/spec/auth/scope.md#resource-scope-grammar
if req.FormValue("scope") != "" {
for _, scope
|
random_line_split
|
||
server.go
|
}
if c.LDAPAuth != nil {
la, err := authn.NewLDAPAuth(c.LDAPAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, la)
}
if c.MongoAuth != nil {
ma, err := authn.NewMongoAuth(c.MongoAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ma)
}
if c.XormAuthn != nil {
xa, err := authn.NewXormAuth(c.XormAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, xa)
}
if c.PluginAuthn != nil {
pluginAuthn, err := authn.NewPluginAuthn(c.PluginAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, pluginAuthn)
}
if c.PluginAuthz != nil {
pluginAuthz, err := authz.NewPluginAuthzAuthorizer(c.PluginAuthz)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, pluginAuthz)
}
if c.CasbinAuthz != nil {
enforcer, err := casbin.NewEnforcer(c.CasbinAuthz.ModelFilePath, c.CasbinAuthz.PolicyFilePath)
if err != nil {
return nil, err
}
casbinAuthz, err := authz.NewCasbinAuthorizer(enforcer)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, casbinAuthz)
}
return as, nil
}
type authRequest struct {
RemoteConnAddr string
RemoteAddr string
RemoteIP net.IP
User string
Password api.PasswordString
Account string
Service string
Scopes []authScope
Labels api.Labels
}
type authScope struct {
Type string
Class string
Name string
Actions []string
}
type authzResult struct {
scope authScope
autorizedActions []string
}
func (ar authRequest) String() string {
return fmt.Sprintf("{%s:%s@%s %s}", ar.User, ar.Password, ar.RemoteAddr, ar.Scopes)
}
func parseRemoteAddr(ra string) net.IP {
hp := hostPortRegex.FindStringSubmatch(ra)
if hp != nil {
if hp[1] != "" {
ra = hp[1]
} else if hp[2] != "" {
ra = hp[2]
}
}
res := net.ParseIP(ra)
return res
}
func parseScope(scope string) (string, string, error) {
parts := scopeRegex.FindStringSubmatch(scope)
if parts == nil {
return "", "", fmt.Errorf("malformed scope request")
}
switch len(parts) {
case 3:
return parts[1], "", nil
case 4:
return parts[1], parts[3], nil
default:
return "", "", fmt.Errorf("malformed scope request")
}
}
func (as *AuthServer) ParseRequest(req *http.Request) (*authRequest, error) {
ar := &authRequest{RemoteConnAddr: req.RemoteAddr, RemoteAddr: req.RemoteAddr}
if as.config.Server.RealIPHeader != "" {
hv := req.Header.Get(as.config.Server.RealIPHeader)
ips := strings.Split(hv, ",")
realIPPos := as.config.Server.RealIPPos
if realIPPos < 0 {
realIPPos = len(ips) + realIPPos
if realIPPos < 0 {
realIPPos = 0
}
}
ar.RemoteAddr = strings.TrimSpace(ips[realIPPos])
glog.V(3).Infof("Conn ip %s, %s: %s, addr: %s", ar.RemoteAddr, as.config.Server.RealIPHeader, hv, ar.RemoteAddr)
if ar.RemoteAddr == "" {
return nil, fmt.Errorf("client address not provided")
}
}
ar.RemoteIP = parseRemoteAddr(ar.RemoteAddr)
if ar.RemoteIP == nil {
return nil, fmt.Errorf("unable to parse remote addr %s", ar.RemoteAddr)
}
user, password, haveBasicAuth := req.BasicAuth()
if haveBasicAuth {
ar.User = user
ar.Password = api.PasswordString(password)
} else if req.Method == "POST" {
// username and password could be part of form data
username := req.FormValue("username")
password := req.FormValue("password")
if username != "" && password != "" {
ar.User = username
ar.Password = api.PasswordString(password)
}
}
ar.Account = req.FormValue("account")
if ar.Account == "" {
ar.Account = ar.User
} else if haveBasicAuth && ar.Account != ar.User {
return nil, fmt.Errorf("user and account are not the same (%q vs %q)", ar.User, ar.Account)
}
ar.Service = req.FormValue("service")
if err := req.ParseForm(); err != nil {
return nil, fmt.Errorf("invalid form value")
}
// https://github.com/docker/distribution/blob/1b9ab303a477ded9bdd3fc97e9119fa8f9e58fca/docs/spec/auth/scope.md#resource-scope-grammar
if req.FormValue("scope") != "" {
for _, scopeValue := range req.Form["scope"] {
for _, scopeStr := range strings.Split(scopeValue, " ") {
parts := strings.Split(scopeStr, ":")
var scope authScope
scopeType, scopeClass, err := parseScope(parts[0])
if err != nil {
return nil, err
}
switch len(parts) {
case 3:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1],
Actions: strings.Split(parts[2], ","),
}
case 4:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1] + ":" + parts[2],
Actions: strings.Split(parts[3], ","),
}
default:
return nil, fmt.Errorf("invalid scope: %q", scopeStr)
}
sort.Strings(scope.Actions)
ar.Scopes = append(ar.Scopes, scope)
}
}
}
return ar, nil
}
func (as *AuthServer) Authenticate(ar *authRequest) (bool, api.Labels, error) {
for i, a := range as.authenticators {
result, labels, err := a.Authenticate(ar.Account, ar.Password)
glog.V(2).Infof("Authn %s %s -> %t, %+v, %v", a.Name(), ar.Account, result, labels, err)
if err != nil {
if err == api.NoMatch {
continue
} else if err == api.WrongPass {
glog.Warningf("Failed authentication with %s: %s", err, ar.Account)
return false, nil, nil
}
err = fmt.Errorf("authn #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", ar, err)
return false, nil, err
}
return result, labels, nil
}
// Deny by default.
glog.Warningf("%s did not match any authn rule", ar)
return false, nil, nil
}
func (as *AuthServer) authorizeScope(ai *api.AuthRequestInfo) ([]string, error) {
for i, a := range as.authorizers {
result, err := a.Authorize(ai)
glog.V(2).Infof("Authz %s %s -> %s, %s", a.Name(), *ai, result, err)
if err != nil {
if err == api.NoMatch {
continue
}
err = fmt.Errorf("authz #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", *ai, err)
return nil, err
}
return result, nil
}
// Deny by default.
glog.Warningf("%s did not match any authz rule", *ai)
return nil, nil
}
func (as *AuthServer) Authorize(ar *authRequest) ([]authzResult, error)
|
{
ares := []authzResult{}
for _, scope := range ar.Scopes {
ai := &api.AuthRequestInfo{
Account: ar.Account,
Type: scope.Type,
Name: scope.Name,
Service: ar.Service,
IP: ar.RemoteIP,
Actions: scope.Actions,
Labels: ar.Labels,
}
actions, err := as.authorizeScope(ai)
if err != nil {
return nil, err
}
ares = append(ares, authzResult{scope: scope, autorizedActions: actions})
}
return ares, nil
}
|
identifier_body
|
|
server.go
|
Auth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, gha)
as.gha = gha
}
if c.OIDCAuth != nil {
oidc, err := authn.NewOIDCAuth(c.OIDCAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, oidc)
as.oidc = oidc
}
if c.GitlabAuth != nil {
glab, err := authn.NewGitlabAuth(c.GitlabAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, glab)
as.glab = glab
}
if c.LDAPAuth != nil {
la, err := authn.NewLDAPAuth(c.LDAPAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, la)
}
if c.MongoAuth != nil {
ma, err := authn.NewMongoAuth(c.MongoAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ma)
}
if c.XormAuthn != nil {
xa, err := authn.NewXormAuth(c.XormAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, xa)
}
if c.PluginAuthn != nil {
pluginAuthn, err := authn.NewPluginAuthn(c.PluginAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, pluginAuthn)
}
if c.PluginAuthz != nil {
pluginAuthz, err := authz.NewPluginAuthzAuthorizer(c.PluginAuthz)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, pluginAuthz)
}
if c.CasbinAuthz != nil {
enforcer, err := casbin.NewEnforcer(c.CasbinAuthz.ModelFilePath, c.CasbinAuthz.PolicyFilePath)
if err != nil {
return nil, err
}
casbinAuthz, err := authz.NewCasbinAuthorizer(enforcer)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, casbinAuthz)
}
return as, nil
}
type authRequest struct {
RemoteConnAddr string
RemoteAddr string
RemoteIP net.IP
User string
Password api.PasswordString
Account string
Service string
Scopes []authScope
Labels api.Labels
}
type authScope struct {
Type string
Class string
Name string
Actions []string
}
type authzResult struct {
scope authScope
autorizedActions []string
}
func (ar authRequest) String() string {
return fmt.Sprintf("{%s:%s@%s %s}", ar.User, ar.Password, ar.RemoteAddr, ar.Scopes)
}
func parseRemoteAddr(ra string) net.IP {
hp := hostPortRegex.FindStringSubmatch(ra)
if hp != nil {
if hp[1] != "" {
ra = hp[1]
} else if hp[2] != "" {
ra = hp[2]
}
}
res := net.ParseIP(ra)
return res
}
func parseScope(scope string) (string, string, error) {
parts := scopeRegex.FindStringSubmatch(scope)
if parts == nil {
return "", "", fmt.Errorf("malformed scope request")
}
switch len(parts) {
case 3:
return parts[1], "", nil
case 4:
return parts[1], parts[3], nil
default:
return "", "", fmt.Errorf("malformed scope request")
}
}
func (as *AuthServer) ParseRequest(req *http.Request) (*authRequest, error) {
ar := &authRequest{RemoteConnAddr: req.RemoteAddr, RemoteAddr: req.RemoteAddr}
if as.config.Server.RealIPHeader != "" {
hv := req.Header.Get(as.config.Server.RealIPHeader)
ips := strings.Split(hv, ",")
realIPPos := as.config.Server.RealIPPos
if realIPPos < 0 {
realIPPos = len(ips) + realIPPos
if realIPPos < 0 {
realIPPos = 0
}
}
ar.RemoteAddr = strings.TrimSpace(ips[realIPPos])
glog.V(3).Infof("Conn ip %s, %s: %s, addr: %s", ar.RemoteAddr, as.config.Server.RealIPHeader, hv, ar.RemoteAddr)
if ar.RemoteAddr == "" {
return nil, fmt.Errorf("client address not provided")
}
}
ar.RemoteIP = parseRemoteAddr(ar.RemoteAddr)
if ar.RemoteIP == nil {
return nil, fmt.Errorf("unable to parse remote addr %s", ar.RemoteAddr)
}
user, password, haveBasicAuth := req.BasicAuth()
if haveBasicAuth {
ar.User = user
ar.Password = api.PasswordString(password)
} else if req.Method == "POST" {
// username and password could be part of form data
username := req.FormValue("username")
password := req.FormValue("password")
if username != "" && password != "" {
ar.User = username
ar.Password = api.PasswordString(password)
}
}
ar.Account = req.FormValue("account")
if ar.Account == "" {
ar.Account = ar.User
} else if haveBasicAuth && ar.Account != ar.User {
return nil, fmt.Errorf("user and account are not the same (%q vs %q)", ar.User, ar.Account)
}
ar.Service = req.FormValue("service")
if err := req.ParseForm(); err != nil {
return nil, fmt.Errorf("invalid form value")
}
// https://github.com/docker/distribution/blob/1b9ab303a477ded9bdd3fc97e9119fa8f9e58fca/docs/spec/auth/scope.md#resource-scope-grammar
if req.FormValue("scope") != "" {
for _, scopeValue := range req.Form["scope"] {
for _, scopeStr := range strings.Split(scopeValue, " ") {
parts := strings.Split(scopeStr, ":")
var scope authScope
scopeType, scopeClass, err := parseScope(parts[0])
if err != nil {
return nil, err
}
switch len(parts) {
case 3:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1],
Actions: strings.Split(parts[2], ","),
}
case 4:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1] + ":" + parts[2],
Actions: strings.Split(parts[3], ","),
}
default:
return nil, fmt.Errorf("invalid scope: %q", scopeStr)
}
sort.Strings(scope.Actions)
ar.Scopes = append(ar.Scopes, scope)
}
}
}
return ar, nil
}
func (as *AuthServer) Authenticate(ar *authRequest) (bool, api.Labels, error) {
for i, a := range as.authenticators {
result, labels, err := a.Authenticate(ar.Account, ar.Password)
glog.V(2).Infof("Authn %s %s -> %t, %+v, %v", a.Name(), ar.Account, result, labels, err)
if err != nil
|
return result, labels, nil
}
// Deny by default.
glog.Warningf("%s did not match any authn rule", ar)
return false, nil, nil
}
func (as *AuthServer) authorizeScope(ai *api.AuthRequestInfo) ([]string, error) {
for i, a := range as.authorizers {
result, err := a.Authorize(ai)
glog.V(2).Infof("Authz %s %s -> %s, %s", a.Name(), *ai, result, err)
if err != nil {
if err == api.NoMatch {
continue
}
err = fmt.Errorf("authz #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", *ai, err)
return nil, err
}
return result, nil
}
// Deny by default.
glog.Warningf("%s
|
{
if err == api.NoMatch {
continue
} else if err == api.WrongPass {
glog.Warningf("Failed authentication with %s: %s", err, ar.Account)
return false, nil, nil
}
err = fmt.Errorf("authn #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", ar, err)
return false, nil, err
}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.