file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
console.js
|
the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or any later version.
*
* Aloha Editor is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* As an additional permission to the GNU GPL version 2, you may distribute
* non-source (e.g., minimized or compacted) forms of the Aloha-Editor
* source code without the copy of the GNU GPL normally required,
* provided you include this license notice and a URL through which
* recipients can access the Corresponding Source.
*/
define([
'aloha/core',
'util/class',
'jquery'
], function (Aloha, Class, jQuery) {
"use strict";
/**
* This is the aloha Log
* @namespace Aloha
* @class Log
* @singleton
*/
var AlohaConsole = Class.extend({
/**
* Initialize the logging
* @hide
*/
init:function () {
// initialize the logging settings (if not present)
if (typeof Aloha.settings.logLevels === 'undefined' || !Aloha.settings.logLevels) {
Aloha.settings.logLevels = {
'error':true,
'warn':true
};
}
// initialize the logHistory settings (if not present)
if (typeof Aloha.settings.logHistory === 'undefined' || !Aloha.settings.logHistory) {
Aloha.settings.logHistory = {};
}
// set the default values for the loghistory
if (!Aloha.settings.logHistory.maxEntries) {
Aloha.settings.logHistory.maxEntries = 100;
}
if (!Aloha.settings.logHistory.highWaterMark) {
Aloha.settings.logHistory.highWaterMark = 90;
}
if (!Aloha.settings.logHistory.levels) {
Aloha.settings.logHistory.levels = {
'error':true,
'warn':true
};
}
this.flushLogHistory();
Aloha.trigger('aloha-logger-ready');
},
/**
* Log History as array of Message Objects. Every object has the properties
* 'level', 'component' and 'message'
* @property
* @type Array
* @hide
*/
logHistory:[],
/**
* Flag, which is set as soon as the highWaterMark for the log history is reached.
* This flag is reset on every call of flushLogHistory()
* @hide
*/
highWaterMarkReached:false,
/**
* Logs a message to the console
* @method
* @param {String} level Level of the log ('error', 'warn' or 'info', 'debug')
* @param {String} component Component that calls the log
* @param {String} message log message
*/
log:function (level, component, message) {
// log ('Logging message');
if (typeof component === 'undefined') {
message = level;
}
if (typeof component !== 'string' && component && component.toString) {
component = component.toString();
}
// log ('warn', 'Warning message');
if (typeof message === 'undefined') {
message = component;
component = undefined;
}
if (typeof level === 'undefined' || !level) {
level = 'log';
}
level = level.toLowerCase();
if (typeof Aloha.settings.logLevels === "undefined")
|
// now check whether the log level is activated
if (!Aloha.settings.logLevels[level]) {
return;
}
component = component || "Unkown Aloha Component";
this.addToLogHistory({
'level':level,
'component':component,
'message':message,
'date':new Date()
});
var console = window.console;
switch (level) {
case 'error':
if (window.console && console.error) {
// FIXME:
// Using console.error rather than throwing an error is very
// problematic because we get not stack.
// We ought to consider doing the following:
// throw component + ': ' + message;
if (!component && !message) {
console.error("Error occured without message and component");
} else {
console.error(component + ': ' + message);
}
}
break;
case 'warn':
if (window.console && console.warn) {
console.warn(component + ': ' + message);
}
break;
case 'info':
if (window.console && console.info) {
console.info(component + ': ' + message);
}
break;
case 'debug':
if (window.console && console.log) {
console.log(component + ' [' + level + ']: ' + message);
}
break;
default:
if (window.console && console.log) {
console.log(component + ' [' + level + ']: ' + message);
}
break;
}
},
/**
* Log a message of log level 'error'
* @method
* @param {String} component Component that calls the log
* @param {String} message log message
*/
error:function (component, message) {
this.log('error', component, message);
},
/**
* Log a message of log level 'warn'
* @method
* @param {String} component Component that calls the log
* @param {String} message log message
*/
warn:function (component, message) {
this.log('warn', component, message);
},
/**
* Log a message of log level 'info'
* @method
* @param {String} component Component that calls the log
* @param {String} message log message
*/
info:function (component, message) {
this.log('info', component, message);
},
/**
* Log a message of log level 'debug'
* @param {String} component Component that calls the log
* @param {String} message log message
*/
debug:function (component, message) {
this.log('debug', component, message);
},
/**
* Methods to mark function as deprecated for developers.
* @param {String} component String that calls the log
* @param {String} message log message
*/
deprecated:function (component, message) {
this.log('warn', component, message);
// help the developer to locate the call.
if (Aloha.settings.logLevels.deprecated) {
throw new Error(message);
}
},
/**
* Check whether the given log level is currently enabled
* @param {String} level
* @return true when log level is enabled, false if not
*/
isLogLevelEnabled:function (level) {
return Aloha.settings && Aloha.settings.logLevels && Aloha.settings.logLevels[level];
},
/**
* Check whether error logging is enabled
* @return true if error logging is enabled, false if not
*/
isErrorEnabled:function () {
return this.isLogLevelEnabled('error');
},
/**
* Check whether warn logging is enabled
* @return true if warn logging is enabled, false if not
*/
isWarnEnabled:function () {
return this.isLogLevelEnabled('warn');
},
/**
* Check whether info logging is enabled
* @return true if info logging is enabled, false if not
*/
isInfoEnabled:function () {
return this.isLogLevelEnabled('info');
},
/**
* Check whether debug logging is enabled
* @return true if debug logging is enabled, false if not
*/
isDebugEnabled:function () {
return this.isLogLevelEnabled('debug');
},
/**
* Add the given entry to the log history. Check whether the highWaterMark has been reached, and fire an event if yes.
* @param {Object} entry entry to be added to the log history
* @hide
*/
addToLogHistory:function (entry) {
if (!Aloha.settings.logHistory) {
this.init();
}
// when maxEntries is set to something illegal, we do nothing (log history is disabled)
// check whether the level is one we like to have logged
if (Aloha.settings.logHistory.maxEntries <= 0 || !Aloha.settings.logHistory.levels[entry.level]) {
return;
}
// first add the entry as last element to the history array
this.logHistory.push(entry);
// check whether the highWaterMark was reached, if so, fire an event
if (!this.highWaterMarkReached) {
if (this.logHistory.length >= Aloha.settings.logHistory.maxEntries * Aloha.settings.logHistory.highWaterMark / 100) {
// fire the event
Aloha.trigger('aloha-log-full');
// set the flag (so we will not fire the event again until the logHistory is flushed)
this.highWaterMarkReached
|
{
return;
}
|
conditional_block
|
SearchForm.js
|
Picker, TouchableHighlight, Text } from 'react-native'
import { GooglePlacesAutocomplete } from 'react-native-google-places-autocomplete';
import MultiSelect from 'react-native-multiple-select';
import categories from '../components/Categories'
// CITATION: https://stackoverflow.com/questions/37230555/get-with-query-string-with-fetch-in-react-native
function queryString(query) {
// get array of key value pairs ([[k1, v1], [k2, v2]])
const qs = Object.entries(query)
// filter pairs with undefined value
.filter(pair => pair[1] !== undefined)
// encode keys and values, remove the value if it is null, but leave the key
.map(pair => pair.filter(i => i !== null).map(encodeURIComponent).join('='))
.join('&');
return qs && '?' + qs;
}
export default class SearchForm extends React.Component {
constructor(props) {
super(props);
this.state = {
// address: null,
address: '10304, Marcus Avenue, Tujunga, Los Angeles, Los Angeles County, California, United States, 91042, 2010',
selectedItems: [],
selectedItemDistance: []
};
this.optionsDistance = [
{ id: "1", name: '1 mile' },
{ id: "5", name: '5 miles' },
{ id: "10", name: '10 miles' },
{ id: "25", name: '25 miles' },
{ id: "50", name: '50 miles' }
];
this.onSelectedItemsChange = this.onSelectedItemsChange.bind(this);
this.onSelectedItemsChangeDistance = this.onSelectedItemsChangeDistance.bind(this);
this.handleSubmit = this.handleSubmit.bind(this);
this.handlePlaceSelect = this.handlePlaceSelect.bind(this);
}
onSelectedItemsChange = selectedItemsPassed => {
this.setState({
selectedItems: selectedItemsPassed
})
};
onSelectedItemsChangeDistance = selectedItemPassed => {
this.setState({
selectedItemDistance: selectedItemPassed
})
};
async getPictures(prefixPassed) {
const response = await fetch('http://192.168.1.24:8081/getImages', {
method: "POST",
headers: {
'Content-type': 'application/json'
},
// credentials: 'include',
body: JSON.stringify({prefix: prefixPassed})
})
.then(function(response){
if(response.status!==200){
// throw an error alert
console.log("ERROR!", response)
}
else{
return response.json();
}
})
.then(data => {
if(data){
return data
}
});
return response
}
async
|
(store_id){
let response = await fetch('http://192.168.1.24:8081/stores/' + store_id + "/services/", {
method: "GET",
headers: {
'Content-type': 'application/json'
},
// credentials: 'include'
})
.then(function(response){
if(response.status!==200){
// throw an error alert
console.log("ERROR!")
}
else{
return response.json();
}
})
.then(data => {
if(data){
return data
}
});
return response
}
handleSubmit() {
if (this.state.address && this.state.address.length > 0 && this.state.selectedItemDistance.length > 0) {
let modifyState = function(state) {
return {
address: state.address,
distance: parseInt(state.selectedItemDistance[0]),
nails: state.selectedItems.includes("Nails"),
hair: state.selectedItems.includes("Hair"),
makeup: state.selectedItems.includes("Makeup"),
facials: state.selectedItems.includes("Facials"),
barber: state.selectedItems.includes("Barber"),
spa: state.selectedItems.includes("Spa"),
}
}
let modifiedState = modifyState(this.state)
let query = queryString(modifiedState)
let getPictures = this.getPictures
let getServices = this.getServices
fetch('http://192.168.1.24:8081/stores' + query, {
headers: {
'Content-Type': 'application/json',
},
method: "GET"
})
.then(function(response){
if(response.status!==200){
alert("Invalid search!");
}
else{
return response.json()
}
})
.then(async data => {
if(data){
if(data.stores.length > 0){
for(let i = 0; i < data.stores.length; i++){
let pictures = await getPictures('stores/' + data.stores[i].id + '/images/')
let services = await getServices(data.stores[i].id)
data.stores[i].pictures = pictures
data.stores[i].services = services
}
this.props.navigation.navigate('SearchDisplay', {
stores: data.stores,
center: data.center
})
}
else{
alert("No search results!");
}
}
})
}
}
handlePlaceSelect(data) {
this.setState({
address: data.description
})
}
render() {
return (
<SafeAreaView style={styles.container}>
<Fragment>
<Text style={styles.title}>
Search Now
</Text>
<GooglePlacesAutocomplete
name="address"
placeholder="Try 'New Haven, CT'"
minLength={2} // minimum length of text to search
autoFocus={false}
onPress={(data, details = null) => { // 'details' is provided when fetchDetails = true
if(data.description){
this.setState({
address: data.description
})
}
else if(data.vicinity){
this.setState({
address: data.vicinity
})
}
else{
console.log(data)
}
}}
getDefaultValue={() => ''}
query={{
// available options: https://developers.google.com/places/web-service/autocomplete
key: '', // Does work, supply with Google API Key for use
language: 'en', // language of the results
}}
styles={{
textInputContainer: {
margin: 10
},
iconStyle: {
marginRight: 10
}
}}
currentLocation={true} // Will add a 'Current location' button at the top of the predefined places list
currentLocationLabel="Current location"
nearbyPlacesAPI='GooglePlacesSearch' // Which API to use: GoogleReverseGeocoding or GooglePlacesSearch
GoogleReverseGeocodingQuery={{
// available options for GoogleReverseGeocoding API : https://developers.google.com/maps/documentation/geocoding/intro
}}
GooglePlacesSearchQuery={{
// available options for GooglePlacesSearch API : https://developers.google.com/places/web-service/search
rankby: 'distance',
types: 'food'
}}
filterReverseGeocodingByTypes={['locality', 'administrative_area_level_3']} // filter the reverse geocoding results by types - ['locality', 'administrative_area_level_3'] if you want to display only cities
/>
<View style={{ flex: 1, marginLeft: 10, marginRight: 10 }}>
<MultiSelect
single={true}
hideTags
items={this.optionsDistance}
uniqueKey="id"
onSelectedItemsChange={this.onSelectedItemsChangeDistance}
selectedItems={this.state.selectedItemDistance}
selectText="Select Distance"
searchInputPlaceholderText="Within..."
onChangeInput={ (text)=> console.log(text)}
tagRemoveIconColor="#CCC"
tagBorderColor="#CCC"
tagTextColor="#CCC"
selectedItemTextColor="#CCC"
selectedItemIconColor="#CCC"
itemTextColor="#000"
displayKey="name"
searchInputStyle={{ color: '#CCC' }}
submitButtonColor="#CCC"
submitButtonText="Done"
/>
</View>
<View style={{ flex: 1, marginLeft: 10, marginRight: 10 }}>
<MultiSelect
hideTags
items={categories}
uniqueKey="key"
ref={(component) => { this.multiSelect = component }}
onSelectedItemsChange={this.onSelectedItemsChange}
selectedItems={this.state.selectedItems}
selectText="Select Category"
searchInputPlaceholderText="Search Categories..."
onChangeInput={ (text)=> console.log(text)}
tagRemoveIconColor="#CCC"
tagBorderColor="#CCC"
tagTextColor="#CCC"
selectedItemTextColor="#CCC"
selectedItemIconColor="#CCC"
itemTextColor="#000"
displayKey="value"
searchInputStyle={{ color: '#CCC' }}
submitButtonColor="#CCC"
submitButtonText="Done"
/>
<View>
{this.multiSelect && this.multiSelect.getSelectedItemsExt(this.state.selectedItems)}
</View>
</View>
<View style={styles.buttonContainer}>
<TouchableHighlight
onPress={this.handleSubmit}
>
<Text style={styles.button}>Search</Text>
</TouchableHighlight>
</View>
</Fragment>
</SafeAreaView>
)
}
}
const styles = StyleSheet.create
|
getServices
|
identifier_name
|
SearchForm.js
|
Picker, TouchableHighlight, Text } from 'react-native'
import { GooglePlacesAutocomplete } from 'react-native-google-places-autocomplete';
import MultiSelect from 'react-native-multiple-select';
import categories from '../components/Categories'
// CITATION: https://stackoverflow.com/questions/37230555/get-with-query-string-with-fetch-in-react-native
function queryString(query) {
// get array of key value pairs ([[k1, v1], [k2, v2]])
const qs = Object.entries(query)
// filter pairs with undefined value
.filter(pair => pair[1] !== undefined)
// encode keys and values, remove the value if it is null, but leave the key
.map(pair => pair.filter(i => i !== null).map(encodeURIComponent).join('='))
.join('&');
return qs && '?' + qs;
}
export default class SearchForm extends React.Component {
constructor(props) {
super(props);
this.state = {
// address: null,
address: '10304, Marcus Avenue, Tujunga, Los Angeles, Los Angeles County, California, United States, 91042, 2010',
selectedItems: [],
selectedItemDistance: []
};
this.optionsDistance = [
{ id: "1", name: '1 mile' },
{ id: "5", name: '5 miles' },
{ id: "10", name: '10 miles' },
{ id: "25", name: '25 miles' },
{ id: "50", name: '50 miles' }
];
this.onSelectedItemsChange = this.onSelectedItemsChange.bind(this);
this.onSelectedItemsChangeDistance = this.onSelectedItemsChangeDistance.bind(this);
this.handleSubmit = this.handleSubmit.bind(this);
this.handlePlaceSelect = this.handlePlaceSelect.bind(this);
}
onSelectedItemsChange = selectedItemsPassed => {
this.setState({
selectedItems: selectedItemsPassed
})
};
onSelectedItemsChangeDistance = selectedItemPassed => {
this.setState({
selectedItemDistance: selectedItemPassed
})
};
async getPictures(prefixPassed) {
const response = await fetch('http://192.168.1.24:8081/getImages', {
method: "POST",
headers: {
'Content-type': 'application/json'
},
// credentials: 'include',
body: JSON.stringify({prefix: prefixPassed})
})
.then(function(response){
if(response.status!==200){
// throw an error alert
console.log("ERROR!", response)
}
else{
return response.json();
}
})
.then(data => {
if(data){
return data
}
});
return response
}
async getServices(store_id){
let response = await fetch('http://192.168.1.24:8081/stores/' + store_id + "/services/", {
method: "GET",
headers: {
'Content-type': 'application/json'
},
// credentials: 'include'
})
.then(function(response){
if(response.status!==200){
// throw an error alert
console.log("ERROR!")
}
else{
return response.json();
}
})
.then(data => {
if(data){
return data
}
});
return response
}
handleSubmit() {
if (this.state.address && this.state.address.length > 0 && this.state.selectedItemDistance.length > 0) {
let modifyState = function(state) {
return {
address: state.address,
distance: parseInt(state.selectedItemDistance[0]),
nails: state.selectedItems.includes("Nails"),
hair: state.selectedItems.includes("Hair"),
makeup: state.selectedItems.includes("Makeup"),
facials: state.selectedItems.includes("Facials"),
barber: state.selectedItems.includes("Barber"),
spa: state.selectedItems.includes("Spa"),
}
}
let modifiedState = modifyState(this.state)
let query = queryString(modifiedState)
let getPictures = this.getPictures
let getServices = this.getServices
fetch('http://192.168.1.24:8081/stores' + query, {
headers: {
'Content-Type': 'application/json',
},
method: "GET"
})
.then(function(response){
if(response.status!==200){
alert("Invalid search!");
}
else{
return response.json()
}
})
.then(async data => {
if(data){
if(data.stores.length > 0){
for(let i = 0; i < data.stores.length; i++){
let pictures = await getPictures('stores/' + data.stores[i].id + '/images/')
let services = await getServices(data.stores[i].id)
data.stores[i].pictures = pictures
data.stores[i].services = services
}
this.props.navigation.navigate('SearchDisplay', {
stores: data.stores,
center: data.center
})
}
else{
alert("No search results!");
}
}
})
}
}
handlePlaceSelect(data) {
this.setState({
address: data.description
})
}
render() {
return (
<SafeAreaView style={styles.container}>
<Fragment>
<Text style={styles.title}>
Search Now
</Text>
<GooglePlacesAutocomplete
name="address"
placeholder="Try 'New Haven, CT'"
minLength={2} // minimum length of text to search
autoFocus={false}
onPress={(data, details = null) => { // 'details' is provided when fetchDetails = true
if(data.description){
this.setState({
address: data.description
})
}
else if(data.vicinity){
this.setState({
address: data.vicinity
})
}
else
|
}}
getDefaultValue={() => ''}
query={{
// available options: https://developers.google.com/places/web-service/autocomplete
key: '', // Does work, supply with Google API Key for use
language: 'en', // language of the results
}}
styles={{
textInputContainer: {
margin: 10
},
iconStyle: {
marginRight: 10
}
}}
currentLocation={true} // Will add a 'Current location' button at the top of the predefined places list
currentLocationLabel="Current location"
nearbyPlacesAPI='GooglePlacesSearch' // Which API to use: GoogleReverseGeocoding or GooglePlacesSearch
GoogleReverseGeocodingQuery={{
// available options for GoogleReverseGeocoding API : https://developers.google.com/maps/documentation/geocoding/intro
}}
GooglePlacesSearchQuery={{
// available options for GooglePlacesSearch API : https://developers.google.com/places/web-service/search
rankby: 'distance',
types: 'food'
}}
filterReverseGeocodingByTypes={['locality', 'administrative_area_level_3']} // filter the reverse geocoding results by types - ['locality', 'administrative_area_level_3'] if you want to display only cities
/>
<View style={{ flex: 1, marginLeft: 10, marginRight: 10 }}>
<MultiSelect
single={true}
hideTags
items={this.optionsDistance}
uniqueKey="id"
onSelectedItemsChange={this.onSelectedItemsChangeDistance}
selectedItems={this.state.selectedItemDistance}
selectText="Select Distance"
searchInputPlaceholderText="Within..."
onChangeInput={ (text)=> console.log(text)}
tagRemoveIconColor="#CCC"
tagBorderColor="#CCC"
tagTextColor="#CCC"
selectedItemTextColor="#CCC"
selectedItemIconColor="#CCC"
itemTextColor="#000"
displayKey="name"
searchInputStyle={{ color: '#CCC' }}
submitButtonColor="#CCC"
submitButtonText="Done"
/>
</View>
<View style={{ flex: 1, marginLeft: 10, marginRight: 10 }}>
<MultiSelect
hideTags
items={categories}
uniqueKey="key"
ref={(component) => { this.multiSelect = component }}
onSelectedItemsChange={this.onSelectedItemsChange}
selectedItems={this.state.selectedItems}
selectText="Select Category"
searchInputPlaceholderText="Search Categories..."
onChangeInput={ (text)=> console.log(text)}
tagRemoveIconColor="#CCC"
tagBorderColor="#CCC"
tagTextColor="#CCC"
selectedItemTextColor="#CCC"
selectedItemIconColor="#CCC"
itemTextColor="#000"
displayKey="value"
searchInputStyle={{ color: '#CCC' }}
submitButtonColor="#CCC"
submitButtonText="Done"
/>
<View>
{this.multiSelect && this.multiSelect.getSelectedItemsExt(this.state.selectedItems)}
</View>
</View>
<View style={styles.buttonContainer}>
<TouchableHighlight
onPress={this.handleSubmit}
>
<Text style={styles.button}>Search</Text>
</TouchableHighlight>
</View>
</Fragment>
</SafeAreaView>
)
}
}
const styles = StyleSheet.create
|
{
console.log(data)
}
|
conditional_block
|
SearchForm.js
|
Picker, TouchableHighlight, Text } from 'react-native'
import { GooglePlacesAutocomplete } from 'react-native-google-places-autocomplete';
import MultiSelect from 'react-native-multiple-select';
import categories from '../components/Categories'
// CITATION: https://stackoverflow.com/questions/37230555/get-with-query-string-with-fetch-in-react-native
function queryString(query) {
// get array of key value pairs ([[k1, v1], [k2, v2]])
const qs = Object.entries(query)
// filter pairs with undefined value
.filter(pair => pair[1] !== undefined)
// encode keys and values, remove the value if it is null, but leave the key
.map(pair => pair.filter(i => i !== null).map(encodeURIComponent).join('='))
.join('&');
return qs && '?' + qs;
}
export default class SearchForm extends React.Component {
constructor(props) {
super(props);
this.state = {
// address: null,
address: '10304, Marcus Avenue, Tujunga, Los Angeles, Los Angeles County, California, United States, 91042, 2010',
selectedItems: [],
selectedItemDistance: []
};
this.optionsDistance = [
{ id: "1", name: '1 mile' },
{ id: "5", name: '5 miles' },
{ id: "10", name: '10 miles' },
{ id: "25", name: '25 miles' },
{ id: "50", name: '50 miles' }
];
this.onSelectedItemsChange = this.onSelectedItemsChange.bind(this);
this.onSelectedItemsChangeDistance = this.onSelectedItemsChangeDistance.bind(this);
this.handleSubmit = this.handleSubmit.bind(this);
this.handlePlaceSelect = this.handlePlaceSelect.bind(this);
}
onSelectedItemsChange = selectedItemsPassed => {
this.setState({
selectedItems: selectedItemsPassed
|
onSelectedItemsChangeDistance = selectedItemPassed => {
this.setState({
selectedItemDistance: selectedItemPassed
})
};
async getPictures(prefixPassed) {
const response = await fetch('http://192.168.1.24:8081/getImages', {
method: "POST",
headers: {
'Content-type': 'application/json'
},
// credentials: 'include',
body: JSON.stringify({prefix: prefixPassed})
})
.then(function(response){
if(response.status!==200){
// throw an error alert
console.log("ERROR!", response)
}
else{
return response.json();
}
})
.then(data => {
if(data){
return data
}
});
return response
}
async getServices(store_id){
let response = await fetch('http://192.168.1.24:8081/stores/' + store_id + "/services/", {
method: "GET",
headers: {
'Content-type': 'application/json'
},
// credentials: 'include'
})
.then(function(response){
if(response.status!==200){
// throw an error alert
console.log("ERROR!")
}
else{
return response.json();
}
})
.then(data => {
if(data){
return data
}
});
return response
}
handleSubmit() {
if (this.state.address && this.state.address.length > 0 && this.state.selectedItemDistance.length > 0) {
let modifyState = function(state) {
return {
address: state.address,
distance: parseInt(state.selectedItemDistance[0]),
nails: state.selectedItems.includes("Nails"),
hair: state.selectedItems.includes("Hair"),
makeup: state.selectedItems.includes("Makeup"),
facials: state.selectedItems.includes("Facials"),
barber: state.selectedItems.includes("Barber"),
spa: state.selectedItems.includes("Spa"),
}
}
let modifiedState = modifyState(this.state)
let query = queryString(modifiedState)
let getPictures = this.getPictures
let getServices = this.getServices
fetch('http://192.168.1.24:8081/stores' + query, {
headers: {
'Content-Type': 'application/json',
},
method: "GET"
})
.then(function(response){
if(response.status!==200){
alert("Invalid search!");
}
else{
return response.json()
}
})
.then(async data => {
if(data){
if(data.stores.length > 0){
for(let i = 0; i < data.stores.length; i++){
let pictures = await getPictures('stores/' + data.stores[i].id + '/images/')
let services = await getServices(data.stores[i].id)
data.stores[i].pictures = pictures
data.stores[i].services = services
}
this.props.navigation.navigate('SearchDisplay', {
stores: data.stores,
center: data.center
})
}
else{
alert("No search results!");
}
}
})
}
}
handlePlaceSelect(data) {
this.setState({
address: data.description
})
}
render() {
return (
<SafeAreaView style={styles.container}>
<Fragment>
<Text style={styles.title}>
Search Now
</Text>
<GooglePlacesAutocomplete
name="address"
placeholder="Try 'New Haven, CT'"
minLength={2} // minimum length of text to search
autoFocus={false}
onPress={(data, details = null) => { // 'details' is provided when fetchDetails = true
if(data.description){
this.setState({
address: data.description
})
}
else if(data.vicinity){
this.setState({
address: data.vicinity
})
}
else{
console.log(data)
}
}}
getDefaultValue={() => ''}
query={{
// available options: https://developers.google.com/places/web-service/autocomplete
key: '', // Does work, supply with Google API Key for use
language: 'en', // language of the results
}}
styles={{
textInputContainer: {
margin: 10
},
iconStyle: {
marginRight: 10
}
}}
currentLocation={true} // Will add a 'Current location' button at the top of the predefined places list
currentLocationLabel="Current location"
nearbyPlacesAPI='GooglePlacesSearch' // Which API to use: GoogleReverseGeocoding or GooglePlacesSearch
GoogleReverseGeocodingQuery={{
// available options for GoogleReverseGeocoding API : https://developers.google.com/maps/documentation/geocoding/intro
}}
GooglePlacesSearchQuery={{
// available options for GooglePlacesSearch API : https://developers.google.com/places/web-service/search
rankby: 'distance',
types: 'food'
}}
filterReverseGeocodingByTypes={['locality', 'administrative_area_level_3']} // filter the reverse geocoding results by types - ['locality', 'administrative_area_level_3'] if you want to display only cities
/>
<View style={{ flex: 1, marginLeft: 10, marginRight: 10 }}>
<MultiSelect
single={true}
hideTags
items={this.optionsDistance}
uniqueKey="id"
onSelectedItemsChange={this.onSelectedItemsChangeDistance}
selectedItems={this.state.selectedItemDistance}
selectText="Select Distance"
searchInputPlaceholderText="Within..."
onChangeInput={ (text)=> console.log(text)}
tagRemoveIconColor="#CCC"
tagBorderColor="#CCC"
tagTextColor="#CCC"
selectedItemTextColor="#CCC"
selectedItemIconColor="#CCC"
itemTextColor="#000"
displayKey="name"
searchInputStyle={{ color: '#CCC' }}
submitButtonColor="#CCC"
submitButtonText="Done"
/>
</View>
<View style={{ flex: 1, marginLeft: 10, marginRight: 10 }}>
<MultiSelect
hideTags
items={categories}
uniqueKey="key"
ref={(component) => { this.multiSelect = component }}
onSelectedItemsChange={this.onSelectedItemsChange}
selectedItems={this.state.selectedItems}
selectText="Select Category"
searchInputPlaceholderText="Search Categories..."
onChangeInput={ (text)=> console.log(text)}
tagRemoveIconColor="#CCC"
tagBorderColor="#CCC"
tagTextColor="#CCC"
selectedItemTextColor="#CCC"
selectedItemIconColor="#CCC"
itemTextColor="#000"
displayKey="value"
searchInputStyle={{ color: '#CCC' }}
submitButtonColor="#CCC"
submitButtonText="Done"
/>
<View>
{this.multiSelect && this.multiSelect.getSelectedItemsExt(this.state.selectedItems)}
</View>
</View>
<View style={styles.buttonContainer}>
<TouchableHighlight
onPress={this.handleSubmit}
>
<Text style={styles.button}>Search</Text>
</TouchableHighlight>
</View>
</Fragment>
</SafeAreaView>
)
}
}
const styles = StyleSheet.create({
|
})
};
|
random_line_split
|
instance.go
|
ifcloudClients.Computing.DescribeInstances(context.TODO(), input)
switch {
case nferrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("failed to describe instance[%q]: %w", *id, err)
}
for _, rs := range out.ReservationSet {
if len(rs.InstancesSet) == 0 {
break
}
instance := rs.InstancesSet[0]
if nifcloud.StringValue(id) != nifcloud.StringValue(instance.InstanceId) {
continue
}
return s.SDKToInstance(out.ReservationSet[0].InstancesSet[0])
}
return nil, nil
}
func (s *Service) GetRunningInstanceByTag(scope *scope.MachineScope) (*infrav1alpha2.Instance, error) {
s.scope.V(2).Info("Looking for existing machine instance by tags")
input := &computing.DescribeInstancesInput{}
out, err := s.scope.NifcloudClients.Computing.DescribeInstances(context.TODO(), input)
switch {
case nferrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("failed to describe running instance: %w", err)
}
filtered := s.FilterInstancesByTag(out.ReservationSet, map[string]string{
"cluster": s.scope.Name(),
"role": scope.Role(),
})
for _, res := range filtered {
for _, instance := range res.InstancesSet {
// filter by name
if nifcloud.StringValue(instance.InstanceId) != nifcloud.StringValue(scope.GetInstanceID()) {
continue
}
return s.SDKToInstance(instance)
}
}
return nil, nil
}
func (s *Service) FilterInstancesByTag(vs []computing.ReservationSetItem, tags map[string]string) []computing.ReservationSetItem {
var filtered []computing.ReservationSetItem
for _, v := range vs {
iTag := v1alpha2.ParseTags(nifcloud.StringValue(v.Description))
ok := true
for key, val := range iTag {
if tags[key] != val {
ok = false
break
}
}
if ok {
filtered = append(filtered, v)
}
}
return filtered
}
func (s *Service) CreateInstance(scope *scope.MachineScope) (*infrav1alpha2.Instance, error) {
s.scope.V(2).Info("Creating an instance for a machine")
instanceID := scope.GetInstanceIDConved()
input := &infrav1alpha2.Instance{
ID: instanceID,
Type: scope.NifcloudMachine.Spec.InstanceType,
NetworkInterfaces: scope.NifcloudMachine.Spec.NetworkInterfaces,
}
// create tags
input.Tag = v1alpha2.BuildTags(v1alpha2.BuildParams{
ClusterName: s.scope.Name(),
Role: nifcloud.String(scope.Role()),
})
var err error
// set image from the machine configuration
if scope.NifcloudMachine.Spec.ImageID != "" {
input.ImageID = scope.NifcloudMachine.Spec.ImageID
} else {
input.ImageID, err = s.defaultImageLookup()
if err != nil {
return nil, err
}
}
// set userdata
userData, err := scope.GetUserData()
if err != nil {
scope.Info("failed to get bootstrap data")
return nil, err
}
input.UserData = pointer.StringPtr(userData)
ids, err := s.GetCoreSecurityGroup(scope)
if err != nil {
return nil, err
}
input.SecurityGroups = append(input.SecurityGroups, ids...)
// set SSH key
input.SSHKeyName = defaultSSHKeyName
if scope.NifcloudMachine.Spec.KeyName != "" {
input.SSHKeyName = scope.NifcloudMachine.Spec.KeyName
} else {
input.SSHKeyName = defaultSSHKeyName
}
s.scope.V(2).Info("Running instance", "machine-role", scope.Role())
out, err := s.runInstance(scope.Role(), input)
if err != nil {
record.Warnf(scope.NifcloudMachine, "FailedCreate", "Failed to create instance: %v", err)
return nil, err
}
if len(input.NetworkInterfaces) > 0 {
for _, id := range input.NetworkInterfaces {
// TODO: attach interface
s.scope.V(2).Info("Attaching security groups to provide network interface", "groups", "[TODO]", "interface", id)
}
}
// set reserved ip addr to controlplane
labels := scope.Machine.GetLabels()
if labels["cluster.x-k8s.io/control-plane"] == "true" {
if err := s.attachAddress(out.ID, scope.NifcloudCluster.Status.APIEndpoints[0].Host); err != nil {
return out, err
}
}
record.Eventf(scope.NifcloudMachine, "SuccessfulCreate", "Created new instance [%s/%s]", scope.Role(), out.ID)
return out, nil
}
func (s *Service) GetCoreSecurityGroup(scope *scope.MachineScope) ([]string, error) {
sgRoles := []infrav1alpha2.SecurityGroupRole{}
switch scope.Role() {
case "node", "control-plane":
sgRoles = append(sgRoles, infrav1alpha2.SecurityGroupControlPlane)
default:
return nil, errors.Errorf("Unknown node role %q", scope.Role())
}
ids := make([]string, 0, len(sgRoles))
for _, sg := range sgRoles {
if _, ok := s.scope.SecurityGroups()[sg]; !ok {
return nil, nferrors.NewFailedDependency(
errors.Errorf("%s security group not available", sg),
)
}
ids = append(ids, s.scope.SecurityGroups()[sg].Name)
}
return ids, nil
}
func (s *Service) StopAndTerminateInstanceWithTimeout(instanceID string) error {
ctx := context.TODO()
input := &computing.DescribeInstancesInput{
InstanceId: []string{instanceID},
}
// stopping server before terminating
if err := s.StopInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to stop", "instance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceStopped(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q stopping: %w", instanceID, err)
}
if err := s.TerminateInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to terminate", "intance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceDeleted(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q termination: %w", instanceID, err)
}
return nil
}
|
s.scope.V(2).Info("Try to terminate instance", "instance-id", instanceID)
input := &computing.TerminateInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.TerminateInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to termiante instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Terminated instance", "instance-id", instanceID)
return nil
}
func (s *Service) StopInstance(instanceID string) error {
s.scope.V(2).Info("Try to stop instance", "instance-id", instanceID)
input := &computing.StopInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.StopInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to stop instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Stoped instance", "instance-id", instanceID)
return nil
}
func (s *Service) runInstance(role string, i *infrav1alpha2.Instance) (*infrav1alpha2.Instance, error) {
apiTermination := infrav1alpha2.ApiTermination
input := &computing.RunInstancesInput{
InstanceId: &i.ID,
InstanceType: &i.Type,
ImageId: &i.ImageID,
KeyName: &i.SSHKeyName,
DisableApiTermination: &apiTermination,
}
if i.UserData != nil {
input.UserData = i.UserData
s.scope.Info("userData size", "bytes", len(nifcloud.StringValue(input.UserData)), "role", role)
}
if len(i.NetworkInterfaces) > 0 {
netInterfaces := make([]computing.RequestNetworkInterfaceStruct, 0, len(i.NetworkInterfaces))
for index, id := range i.NetworkInterfaces {
idx := int64(index)
netInterfaces = append(netInterfaces, computing.RequestNetworkInterfaceStruct{
DeviceIndex: &idx,
NetworkId: &id,
})
}
input.NetworkInterface = netInterfaces
} else {
if len(i.SecurityGroups) > 0 {
input.SecurityGroup = i.SecurityGroups
}
|
func (s *Service) TerminateInstance(instanceID string) error {
|
random_line_split
|
instance.go
|
if len(rs.InstancesSet) == 0 {
break
}
instance := rs.InstancesSet[0]
if nifcloud.StringValue(id) != nifcloud.StringValue(instance.InstanceId) {
continue
}
return s.SDKToInstance(out.ReservationSet[0].InstancesSet[0])
}
return nil, nil
}
func (s *Service) GetRunningInstanceByTag(scope *scope.MachineScope) (*infrav1alpha2.Instance, error) {
s.scope.V(2).Info("Looking for existing machine instance by tags")
input := &computing.DescribeInstancesInput{}
out, err := s.scope.NifcloudClients.Computing.DescribeInstances(context.TODO(), input)
switch {
case nferrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("failed to describe running instance: %w", err)
}
filtered := s.FilterInstancesByTag(out.ReservationSet, map[string]string{
"cluster": s.scope.Name(),
"role": scope.Role(),
})
for _, res := range filtered {
for _, instance := range res.InstancesSet {
// filter by name
if nifcloud.StringValue(instance.InstanceId) != nifcloud.StringValue(scope.GetInstanceID()) {
continue
}
return s.SDKToInstance(instance)
}
}
return nil, nil
}
func (s *Service) FilterInstancesByTag(vs []computing.ReservationSetItem, tags map[string]string) []computing.ReservationSetItem {
var filtered []computing.ReservationSetItem
for _, v := range vs {
iTag := v1alpha2.ParseTags(nifcloud.StringValue(v.Description))
ok := true
for key, val := range iTag {
if tags[key] != val {
ok = false
break
}
}
if ok {
filtered = append(filtered, v)
}
}
return filtered
}
func (s *Service) CreateInstance(scope *scope.MachineScope) (*infrav1alpha2.Instance, error) {
s.scope.V(2).Info("Creating an instance for a machine")
instanceID := scope.GetInstanceIDConved()
input := &infrav1alpha2.Instance{
ID: instanceID,
Type: scope.NifcloudMachine.Spec.InstanceType,
NetworkInterfaces: scope.NifcloudMachine.Spec.NetworkInterfaces,
}
// create tags
input.Tag = v1alpha2.BuildTags(v1alpha2.BuildParams{
ClusterName: s.scope.Name(),
Role: nifcloud.String(scope.Role()),
})
var err error
// set image from the machine configuration
if scope.NifcloudMachine.Spec.ImageID != "" {
input.ImageID = scope.NifcloudMachine.Spec.ImageID
} else {
input.ImageID, err = s.defaultImageLookup()
if err != nil {
return nil, err
}
}
// set userdata
userData, err := scope.GetUserData()
if err != nil {
scope.Info("failed to get bootstrap data")
return nil, err
}
input.UserData = pointer.StringPtr(userData)
ids, err := s.GetCoreSecurityGroup(scope)
if err != nil {
return nil, err
}
input.SecurityGroups = append(input.SecurityGroups, ids...)
// set SSH key
input.SSHKeyName = defaultSSHKeyName
if scope.NifcloudMachine.Spec.KeyName != "" {
input.SSHKeyName = scope.NifcloudMachine.Spec.KeyName
} else {
input.SSHKeyName = defaultSSHKeyName
}
s.scope.V(2).Info("Running instance", "machine-role", scope.Role())
out, err := s.runInstance(scope.Role(), input)
if err != nil {
record.Warnf(scope.NifcloudMachine, "FailedCreate", "Failed to create instance: %v", err)
return nil, err
}
if len(input.NetworkInterfaces) > 0 {
for _, id := range input.NetworkInterfaces {
// TODO: attach interface
s.scope.V(2).Info("Attaching security groups to provide network interface", "groups", "[TODO]", "interface", id)
}
}
// set reserved ip addr to controlplane
labels := scope.Machine.GetLabels()
if labels["cluster.x-k8s.io/control-plane"] == "true" {
if err := s.attachAddress(out.ID, scope.NifcloudCluster.Status.APIEndpoints[0].Host); err != nil {
return out, err
}
}
record.Eventf(scope.NifcloudMachine, "SuccessfulCreate", "Created new instance [%s/%s]", scope.Role(), out.ID)
return out, nil
}
func (s *Service) GetCoreSecurityGroup(scope *scope.MachineScope) ([]string, error) {
sgRoles := []infrav1alpha2.SecurityGroupRole{}
switch scope.Role() {
case "node", "control-plane":
sgRoles = append(sgRoles, infrav1alpha2.SecurityGroupControlPlane)
default:
return nil, errors.Errorf("Unknown node role %q", scope.Role())
}
ids := make([]string, 0, len(sgRoles))
for _, sg := range sgRoles {
if _, ok := s.scope.SecurityGroups()[sg]; !ok {
return nil, nferrors.NewFailedDependency(
errors.Errorf("%s security group not available", sg),
)
}
ids = append(ids, s.scope.SecurityGroups()[sg].Name)
}
return ids, nil
}
func (s *Service) StopAndTerminateInstanceWithTimeout(instanceID string) error {
ctx := context.TODO()
input := &computing.DescribeInstancesInput{
InstanceId: []string{instanceID},
}
// stopping server before terminating
if err := s.StopInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to stop", "instance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceStopped(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q stopping: %w", instanceID, err)
}
if err := s.TerminateInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to terminate", "intance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceDeleted(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q termination: %w", instanceID, err)
}
return nil
}
func (s *Service) TerminateInstance(instanceID string) error {
s.scope.V(2).Info("Try to terminate instance", "instance-id", instanceID)
input := &computing.TerminateInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.TerminateInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to termiante instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Terminated instance", "instance-id", instanceID)
return nil
}
func (s *Service) StopInstance(instanceID string) error {
s.scope.V(2).Info("Try to stop instance", "instance-id", instanceID)
input := &computing.StopInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.StopInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to stop instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Stoped instance", "instance-id", instanceID)
return nil
}
func (s *Service) runInstance(role string, i *infrav1alpha2.Instance) (*infrav1alpha2.Instance, error) {
apiTermination := infrav1alpha2.ApiTermination
input := &computing.RunInstancesInput{
InstanceId: &i.ID,
InstanceType: &i.Type,
ImageId: &i.ImageID,
KeyName: &i.SSHKeyName,
DisableApiTermination: &apiTermination,
}
if i.UserData != nil {
input.UserData = i.UserData
s.scope.Info("userData size", "bytes", len(nifcloud.StringValue(input.UserData)), "role", role)
}
if len(i.NetworkInterfaces) > 0 {
netInterfaces := make([]computing.RequestNetworkInterfaceStruct, 0, len(i.NetworkInterfaces))
for index, id := range i.NetworkInterfaces {
|
{
if id == nil {
s.scope.Info("Instance does not have an instance id")
return nil, nil
}
s.scope.V(2).Info("Looking for instance by id", "instance-id", *id)
input := &computing.DescribeInstancesInput{
InstanceId: []string{nifcloud.StringValue(id)},
}
out, err := s.scope.NifcloudClients.Computing.DescribeInstances(context.TODO(), input)
switch {
case nferrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("failed to describe instance[%q]: %w", *id, err)
}
for _, rs := range out.ReservationSet {
|
identifier_body
|
|
instance.go
|
cloudClients.Computing.DescribeInstances(context.TODO(), input)
switch {
case nferrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("failed to describe instance[%q]: %w", *id, err)
}
for _, rs := range out.ReservationSet {
if len(rs.InstancesSet) == 0 {
break
}
instance := rs.InstancesSet[0]
if nifcloud.StringValue(id) != nifcloud.StringValue(instance.InstanceId) {
continue
}
return s.SDKToInstance(out.ReservationSet[0].InstancesSet[0])
}
return nil, nil
}
func (s *Service) GetRunningInstanceByTag(scope *scope.MachineScope) (*infrav1alpha2.Instance, error) {
s.scope.V(2).Info("Looking for existing machine instance by tags")
input := &computing.DescribeInstancesInput{}
out, err := s.scope.NifcloudClients.Computing.DescribeInstances(context.TODO(), input)
switch {
case nferrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("failed to describe running instance: %w", err)
}
filtered := s.FilterInstancesByTag(out.ReservationSet, map[string]string{
"cluster": s.scope.Name(),
"role": scope.Role(),
})
for _, res := range filtered {
for _, instance := range res.InstancesSet {
// filter by name
if nifcloud.StringValue(instance.InstanceId) != nifcloud.StringValue(scope.GetInstanceID()) {
continue
}
return s.SDKToInstance(instance)
}
}
return nil, nil
}
func (s *Service) FilterInstancesByTag(vs []computing.ReservationSetItem, tags map[string]string) []computing.ReservationSetItem {
var filtered []computing.ReservationSetItem
for _, v := range vs {
iTag := v1alpha2.ParseTags(nifcloud.StringValue(v.Description))
ok := true
for key, val := range iTag {
if tags[key] != val {
ok = false
break
}
}
if ok {
filtered = append(filtered, v)
}
}
return filtered
}
func (s *Service) CreateInstance(scope *scope.MachineScope) (*infrav1alpha2.Instance, error) {
s.scope.V(2).Info("Creating an instance for a machine")
instanceID := scope.GetInstanceIDConved()
input := &infrav1alpha2.Instance{
ID: instanceID,
Type: scope.NifcloudMachine.Spec.InstanceType,
NetworkInterfaces: scope.NifcloudMachine.Spec.NetworkInterfaces,
}
// create tags
input.Tag = v1alpha2.BuildTags(v1alpha2.BuildParams{
ClusterName: s.scope.Name(),
Role: nifcloud.String(scope.Role()),
})
var err error
// set image from the machine configuration
if scope.NifcloudMachine.Spec.ImageID != "" {
input.ImageID = scope.NifcloudMachine.Spec.ImageID
} else {
input.ImageID, err = s.defaultImageLookup()
if err != nil {
return nil, err
}
}
// set userdata
userData, err := scope.GetUserData()
if err != nil {
scope.Info("failed to get bootstrap data")
return nil, err
}
input.UserData = pointer.StringPtr(userData)
ids, err := s.GetCoreSecurityGroup(scope)
if err != nil
|
input.SecurityGroups = append(input.SecurityGroups, ids...)
// set SSH key
input.SSHKeyName = defaultSSHKeyName
if scope.NifcloudMachine.Spec.KeyName != "" {
input.SSHKeyName = scope.NifcloudMachine.Spec.KeyName
} else {
input.SSHKeyName = defaultSSHKeyName
}
s.scope.V(2).Info("Running instance", "machine-role", scope.Role())
out, err := s.runInstance(scope.Role(), input)
if err != nil {
record.Warnf(scope.NifcloudMachine, "FailedCreate", "Failed to create instance: %v", err)
return nil, err
}
if len(input.NetworkInterfaces) > 0 {
for _, id := range input.NetworkInterfaces {
// TODO: attach interface
s.scope.V(2).Info("Attaching security groups to provide network interface", "groups", "[TODO]", "interface", id)
}
}
// set reserved ip addr to controlplane
labels := scope.Machine.GetLabels()
if labels["cluster.x-k8s.io/control-plane"] == "true" {
if err := s.attachAddress(out.ID, scope.NifcloudCluster.Status.APIEndpoints[0].Host); err != nil {
return out, err
}
}
record.Eventf(scope.NifcloudMachine, "SuccessfulCreate", "Created new instance [%s/%s]", scope.Role(), out.ID)
return out, nil
}
func (s *Service) GetCoreSecurityGroup(scope *scope.MachineScope) ([]string, error) {
sgRoles := []infrav1alpha2.SecurityGroupRole{}
switch scope.Role() {
case "node", "control-plane":
sgRoles = append(sgRoles, infrav1alpha2.SecurityGroupControlPlane)
default:
return nil, errors.Errorf("Unknown node role %q", scope.Role())
}
ids := make([]string, 0, len(sgRoles))
for _, sg := range sgRoles {
if _, ok := s.scope.SecurityGroups()[sg]; !ok {
return nil, nferrors.NewFailedDependency(
errors.Errorf("%s security group not available", sg),
)
}
ids = append(ids, s.scope.SecurityGroups()[sg].Name)
}
return ids, nil
}
func (s *Service) StopAndTerminateInstanceWithTimeout(instanceID string) error {
ctx := context.TODO()
input := &computing.DescribeInstancesInput{
InstanceId: []string{instanceID},
}
// stopping server before terminating
if err := s.StopInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to stop", "instance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceStopped(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q stopping: %w", instanceID, err)
}
if err := s.TerminateInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to terminate", "intance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceDeleted(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q termination: %w", instanceID, err)
}
return nil
}
func (s *Service) TerminateInstance(instanceID string) error {
s.scope.V(2).Info("Try to terminate instance", "instance-id", instanceID)
input := &computing.TerminateInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.TerminateInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to termiante instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Terminated instance", "instance-id", instanceID)
return nil
}
func (s *Service) StopInstance(instanceID string) error {
s.scope.V(2).Info("Try to stop instance", "instance-id", instanceID)
input := &computing.StopInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.StopInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to stop instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Stoped instance", "instance-id", instanceID)
return nil
}
func (s *Service) runInstance(role string, i *infrav1alpha2.Instance) (*infrav1alpha2.Instance, error) {
apiTermination := infrav1alpha2.ApiTermination
input := &computing.RunInstancesInput{
InstanceId: &i.ID,
InstanceType: &i.Type,
ImageId: &i.ImageID,
KeyName: &i.SSHKeyName,
DisableApiTermination: &apiTermination,
}
if i.UserData != nil {
input.UserData = i.UserData
s.scope.Info("userData size", "bytes", len(nifcloud.StringValue(input.UserData)), "role", role)
}
if len(i.NetworkInterfaces) > 0 {
netInterfaces := make([]computing.RequestNetworkInterfaceStruct, 0, len(i.NetworkInterfaces))
for index, id := range i.NetworkInterfaces {
idx := int64(index)
netInterfaces = append(netInterfaces, computing.RequestNetworkInterfaceStruct{
DeviceIndex: &idx,
NetworkId: &id,
})
}
input.NetworkInterface = netInterfaces
} else {
if len(i.SecurityGroups) > 0 {
input.SecurityGroup = i.SecurityGroups
}
|
{
return nil, err
}
|
conditional_block
|
instance.go
|
ok {
filtered = append(filtered, v)
}
}
return filtered
}
func (s *Service) CreateInstance(scope *scope.MachineScope) (*infrav1alpha2.Instance, error) {
s.scope.V(2).Info("Creating an instance for a machine")
instanceID := scope.GetInstanceIDConved()
input := &infrav1alpha2.Instance{
ID: instanceID,
Type: scope.NifcloudMachine.Spec.InstanceType,
NetworkInterfaces: scope.NifcloudMachine.Spec.NetworkInterfaces,
}
// create tags
input.Tag = v1alpha2.BuildTags(v1alpha2.BuildParams{
ClusterName: s.scope.Name(),
Role: nifcloud.String(scope.Role()),
})
var err error
// set image from the machine configuration
if scope.NifcloudMachine.Spec.ImageID != "" {
input.ImageID = scope.NifcloudMachine.Spec.ImageID
} else {
input.ImageID, err = s.defaultImageLookup()
if err != nil {
return nil, err
}
}
// set userdata
userData, err := scope.GetUserData()
if err != nil {
scope.Info("failed to get bootstrap data")
return nil, err
}
input.UserData = pointer.StringPtr(userData)
ids, err := s.GetCoreSecurityGroup(scope)
if err != nil {
return nil, err
}
input.SecurityGroups = append(input.SecurityGroups, ids...)
// set SSH key
input.SSHKeyName = defaultSSHKeyName
if scope.NifcloudMachine.Spec.KeyName != "" {
input.SSHKeyName = scope.NifcloudMachine.Spec.KeyName
} else {
input.SSHKeyName = defaultSSHKeyName
}
s.scope.V(2).Info("Running instance", "machine-role", scope.Role())
out, err := s.runInstance(scope.Role(), input)
if err != nil {
record.Warnf(scope.NifcloudMachine, "FailedCreate", "Failed to create instance: %v", err)
return nil, err
}
if len(input.NetworkInterfaces) > 0 {
for _, id := range input.NetworkInterfaces {
// TODO: attach interface
s.scope.V(2).Info("Attaching security groups to provide network interface", "groups", "[TODO]", "interface", id)
}
}
// set reserved ip addr to controlplane
labels := scope.Machine.GetLabels()
if labels["cluster.x-k8s.io/control-plane"] == "true" {
if err := s.attachAddress(out.ID, scope.NifcloudCluster.Status.APIEndpoints[0].Host); err != nil {
return out, err
}
}
record.Eventf(scope.NifcloudMachine, "SuccessfulCreate", "Created new instance [%s/%s]", scope.Role(), out.ID)
return out, nil
}
func (s *Service) GetCoreSecurityGroup(scope *scope.MachineScope) ([]string, error) {
sgRoles := []infrav1alpha2.SecurityGroupRole{}
switch scope.Role() {
case "node", "control-plane":
sgRoles = append(sgRoles, infrav1alpha2.SecurityGroupControlPlane)
default:
return nil, errors.Errorf("Unknown node role %q", scope.Role())
}
ids := make([]string, 0, len(sgRoles))
for _, sg := range sgRoles {
if _, ok := s.scope.SecurityGroups()[sg]; !ok {
return nil, nferrors.NewFailedDependency(
errors.Errorf("%s security group not available", sg),
)
}
ids = append(ids, s.scope.SecurityGroups()[sg].Name)
}
return ids, nil
}
func (s *Service) StopAndTerminateInstanceWithTimeout(instanceID string) error {
ctx := context.TODO()
input := &computing.DescribeInstancesInput{
InstanceId: []string{instanceID},
}
// stopping server before terminating
if err := s.StopInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to stop", "instance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceStopped(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q stopping: %w", instanceID, err)
}
if err := s.TerminateInstance(instanceID); err != nil {
return err
}
s.scope.V(2).Info("Waiting for Nifcloud server to terminate", "intance-id", instanceID)
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceDeleted(ctx, input); err != nil {
return fmt.Errorf("failed to wait for instance %q termination: %w", instanceID, err)
}
return nil
}
func (s *Service) TerminateInstance(instanceID string) error {
s.scope.V(2).Info("Try to terminate instance", "instance-id", instanceID)
input := &computing.TerminateInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.TerminateInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to termiante instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Terminated instance", "instance-id", instanceID)
return nil
}
func (s *Service) StopInstance(instanceID string) error {
s.scope.V(2).Info("Try to stop instance", "instance-id", instanceID)
input := &computing.StopInstancesInput{
InstanceId: []string{instanceID},
}
if _, err := s.scope.NifcloudClients.Computing.StopInstances(context.TODO(), input); err != nil {
return fmt.Errorf("failed to stop instance with id %q: %w", instanceID, err)
}
s.scope.V(2).Info("Stoped instance", "instance-id", instanceID)
return nil
}
func (s *Service) runInstance(role string, i *infrav1alpha2.Instance) (*infrav1alpha2.Instance, error) {
apiTermination := infrav1alpha2.ApiTermination
input := &computing.RunInstancesInput{
InstanceId: &i.ID,
InstanceType: &i.Type,
ImageId: &i.ImageID,
KeyName: &i.SSHKeyName,
DisableApiTermination: &apiTermination,
}
if i.UserData != nil {
input.UserData = i.UserData
s.scope.Info("userData size", "bytes", len(nifcloud.StringValue(input.UserData)), "role", role)
}
if len(i.NetworkInterfaces) > 0 {
netInterfaces := make([]computing.RequestNetworkInterfaceStruct, 0, len(i.NetworkInterfaces))
for index, id := range i.NetworkInterfaces {
idx := int64(index)
netInterfaces = append(netInterfaces, computing.RequestNetworkInterfaceStruct{
DeviceIndex: &idx,
NetworkId: &id,
})
}
input.NetworkInterface = netInterfaces
} else {
if len(i.SecurityGroups) > 0 {
input.SecurityGroup = i.SecurityGroups
}
}
// tag to instance Description
input.Description = i.Tag.ConvToString()
ctx := context.TODO()
creating, err := s.scope.NifcloudClients.Computing.RunInstances(ctx, input)
if err != nil {
return nil, fmt.Errorf("failed to run instance: %w", err)
}
instanceID := *creating.InstancesSet[0].InstanceId
s.scope.V(2).Info("Waiting for instance to be in running state", "instance-id", instanceID)
if len(creating.InstancesSet) == 0 {
return nil, fmt.Errorf("no instance returned for reservation: %v", creating.String())
}
describeInput := &computing.DescribeInstancesInput{
InstanceId: []string{instanceID},
}
if err := s.scope.NifcloudClients.Computing.WaitUntilInstanceRunning(ctx, describeInput); err != nil {
return nil, fmt.Errorf("failed to wait for instance %q running: %w", instanceID, err)
}
running, err := s.scope.NifcloudClients.Computing.DescribeInstances(ctx, describeInput)
switch {
case nferrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, fmt.Errorf("failed to describe instance[%q]: %w", instanceID, err)
}
return s.SDKToInstance(running.ReservationSet[0].InstancesSet[0])
}
func (s *Service) SDKToInstance(v computing.InstancesSetItem) (*infrav1alpha2.Instance, error) {
i := &infrav1alpha2.Instance{
UID: *v.InstanceUniqueId,
ID: *v.InstanceId,
Zone: *v.Placement.AvailabilityZone,
State: infrav1alpha2.InstanceState(*v.InstanceState.Name),
Type: *v.InstanceType,
ImageID: *v.ImageId,
SSHKeyName: *v.KeyName,
PublicIP: *v.IpAddress,
PrivateIP: *v.PrivateIpAddress,
}
i.Tag = v1alpha2.ParseTags(*v.Description)
// TODO: security groups
i.Addresses = s.getInstanceAddresses(&v)
return i, nil
}
func (s *Service)
|
getInstanceAddresses
|
identifier_name
|
|
king.py
|
S {self.planting_cost} RALLODS "
"PER SQUARE MILE TO PLANT.\n"
)
def handle_deaths(
self, distributed_rallods: int, pollution_control_spendings: int
) -> None:
starved_countrymen = max(
0, int(self.countrymen - distributed_rallods / COST_OF_LIVING)
)
if starved_countrymen > 0:
print(f"{starved_countrymen} COUNTRYMEN DIED OF STARVATION")
self.pollution_deaths = int(random() * (INITIAL_LAND - self.land))
if pollution_control_spendings >= POLLUTION_CONTROL_FACTOR:
self.pollution_deaths = int(
self.pollution_deaths
/ (pollution_control_spendings / POLLUTION_CONTROL_FACTOR)
)
if self.pollution_deaths > 0:
print(
f"{self.pollution_deaths} COUNTRYMEN DIED OF CARBON-MONOXIDE "
f"AND DUST INHALATION"
)
self.died_contrymen = starved_countrymen + self.pollution_deaths
if self.died_contrymen > 0:
funeral_cost = self.died_contrymen * COST_OF_FUNERAL
print(f" YOU WERE FORCED TO SPEND {funeral_cost} RALLODS ON ")
print("FUNERAL EXPENSES.")
self.rallods -= funeral_cost
if self.rallods < 0:
print(" INSUFFICIENT RESERVES TO COVER COST - LAND WAS SOLD")
self.land += int(self.rallods / self.land_buy_price)
self.rallods = 0
self.countrymen -= self.died_contrymen
def handle_tourist_trade(self) -> None:
V1 = int(self.settled_people * 22 + random() * 500)
V2 = int((INITIAL_LAND - self.land) * 15)
tourist_trade_earnings = int(V1 - V2)
print(f" YOU MADE {tourist_trade_earnings} RALLODS FROM TOURIST TRADE.")
if V2 != 0 and not (V1 - V2 >= self.tourism_earnings):
print(" DECREASE BECAUSE ")
reason = randint(0, 10)
if reason <= 2:
print("FISH POPULATION HAS DWINDLED DUE TO WATER POLLUTION.")
if reason <= 4:
print("AIR POLLUTION IS KILLING GAME BIRD POPULATION.")
if reason <= 6:
print("MINERAL BATHS ARE BEING RUINED BY WATER POLLUTION.")
if reason <= 8:
print("UNPLEASANT SMOG IS DISCOURAGING SUN BATHERS.")
if reason <= 10:
print("HOTELS ARE LOOKING SHABBY DUE TO SMOG GRIT.")
# NOTE: The following two lines had a bug in the original game:
self.tourism_earnings = abs(int(V1 - V2))
self.rallods += self.tourism_earnings
def handle_harvest(self, planted_sq: int) -> None:
crop_loss = int((INITIAL_LAND - self.land) * ((random() + 1.5) / 2))
if self.foreign_workers != 0:
print(f"OF {planted_sq} SQ. MILES PLANTED,")
if planted_sq <= crop_loss:
crop_loss = planted_sq
harvested = int(planted_sq - crop_loss)
print(f" YOU HARVESTED {harvested} SQ. MILES OF CROPS.")
unlucky_harvesting_worse = crop_loss - self.crop_loss_last_year
if crop_loss != 0:
print(" (DUE TO ", end="")
if unlucky_harvesting_worse > 2:
print("INCREASED ", end="")
print("AIR AND WATER POLLUTION FROM FOREIGN INDUSTRY.)")
revenue = int((planted_sq - crop_loss) * (self.land_buy_price / 2))
print(f"MAKING {revenue} RALLODS.")
self.crop_loss_last_year = crop_loss
self.rallods += revenue
def handle_foreign_workers(
self,
sm_sell_to_industry: int,
distributed_rallods: int,
polltion_control_spendings: int,
) -> None:
foreign_workers_influx = 0
if sm_sell_to_industry != 0:
foreign_workers_influx = int(
sm_sell_to_industry + (random() * 10) - (random() * 20)
)
if self.foreign_workers <= 0:
foreign_workers_influx = foreign_workers_influx + 20
print(f"{foreign_workers_influx} WORKERS CAME TO THE COUNTRY AND")
surplus_distributed = distributed_rallods / COST_OF_LIVING - self.countrymen
population_change = int(
(surplus_distributed / 10)
+ (polltion_control_spendings / POLLUTION_CONTROL_FACTOR)
- ((INITIAL_LAND - self.land) / 50)
- (self.died_contrymen / 2)
)
print(f"{abs(population_change)} COUNTRYMEN ", end="")
if population_change < 0:
print("LEFT ", end="")
else:
print("CAME TO ", end="")
print("THE ISLAND")
self.countrymen += population_change
self.foreign_workers += int(foreign_workers_influx)
def handle_too_many_deaths(self) -> None:
print(f"\n\n\n{self.died_contrymen} COUNTRYMEN DIED IN ONE YEAR!!!!!")
print("\n\n\nDUE TO THIS EXTREME MISMANAGEMENT, YOU HAVE NOT ONLY")
print("BEEN IMPEACHED AND THROWN OUT OF OFFICE, BUT YOU")
message = randint(0, 10)
if message <= 3:
print("ALSO HAD YOUR LEFT EYE GOUGED OUT!")
if message <= 6:
|
if message <= 10:
print("HAVE ALSO BEEN DECLARED NATIONAL FINK.")
sys.exit()
def handle_third_died(self) -> None:
print()
print()
print("OVER ONE THIRD OF THE POPULTATION HAS DIED SINCE YOU")
print("WERE ELECTED TO OFFICE. THE PEOPLE (REMAINING)")
print("HATE YOUR GUTS.")
self.end_game()
def handle_money_mismanagement(self) -> None:
print()
print("MONEY WAS LEFT OVER IN THE TREASURY WHICH YOU DID")
print("NOT SPEND. AS A RESULT, SOME OF YOUR COUNTRYMEN DIED")
print("OF STARVATION. THE PUBLIC IS ENRAGED AND YOU HAVE")
print("BEEN FORCED TO EITHER RESIGN OR COMMIT SUICIDE.")
print("THE CHOICE IS YOURS.")
print("IF YOU CHOOSE THE LATTER, PLEASE TURN OFF YOUR COMPUTER")
print("BEFORE PROCEEDING.")
sys.exit()
def handle_too_many_foreigners(self) -> None:
print("\n\nTHE NUMBER OF FOREIGN WORKERS HAS EXCEEDED THE NUMBER")
print("OF COUNTRYMEN. AS A MINORITY, THEY HAVE REVOLTED AND")
print("TAKEN OVER THE COUNTRY.")
self.end_game()
def end_game(self) -> None:
if random() <= 0.5:
print("YOU HAVE BEEN ASSASSINATED.")
else:
print("YOU HAVE BEEN THROWN OUT OF OFFICE AND ARE NOW")
print("RESIDING IN PRISON.")
sys.exit()
def handle_congratulations(self) -> None:
print("\n\nCONGRATULATIONS!!!!!!!!!!!!!!!!!!")
print(f"YOU HAVE SUCCESFULLY COMPLETED YOUR {YEARS_IN_TERM} YEAR TERM")
print("OF OFFICE. YOU WERE, OF COURSE, EXTREMELY LUCKY, BUT")
print("NEVERTHELESS, IT'S QUITE AN ACHIEVEMENT. GOODBYE AND GOOD")
print("LUCK - YOU'LL PROBABLY NEED IT IF YOU'RE THE TYPE THAT")
print("PLAYS THIS GAME.")
sys.exit()
def print_header() -> None:
print(" " * 34 + "KING")
print(" " * 15 + "CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\n\n\n")
def print_instructions() -> None:
print(
f"""\n\n\nCONGRATULATIONS! YOU'VE JUST BEEN ELECTED PREMIER OF SETATS
DETINU, A SMALL COMMUNIST ISLAND 30 BY 70 MILES LONG. YOUR
JOB IS TO DECIDE UPON THE CONTRY'S BUDGET AND DISTRIBUTE
MONEY TO YOUR COUNTRYMEN FROM THE COMMUNAL TREASURY.
THE MONEY SYSTEM IS RALLODS, AND EACH PERSON NEEDS {COST_OF_LIVING}
RALLODS PER YEAR TO SURVIVE. YOUR COUNTRY'S INCOME COMES
FROM FARM PRODUCE
|
print("HAVE ALSO GAINED A VERY BAD REPUTATION.")
|
conditional_block
|
king.py
|
IMPEACHED AND THROWN OUT OF OFFICE, BUT YOU")
message = randint(0, 10)
if message <= 3:
print("ALSO HAD YOUR LEFT EYE GOUGED OUT!")
if message <= 6:
print("HAVE ALSO GAINED A VERY BAD REPUTATION.")
if message <= 10:
print("HAVE ALSO BEEN DECLARED NATIONAL FINK.")
sys.exit()
def handle_third_died(self) -> None:
print()
print()
print("OVER ONE THIRD OF THE POPULTATION HAS DIED SINCE YOU")
print("WERE ELECTED TO OFFICE. THE PEOPLE (REMAINING)")
print("HATE YOUR GUTS.")
self.end_game()
def handle_money_mismanagement(self) -> None:
print()
print("MONEY WAS LEFT OVER IN THE TREASURY WHICH YOU DID")
print("NOT SPEND. AS A RESULT, SOME OF YOUR COUNTRYMEN DIED")
print("OF STARVATION. THE PUBLIC IS ENRAGED AND YOU HAVE")
print("BEEN FORCED TO EITHER RESIGN OR COMMIT SUICIDE.")
print("THE CHOICE IS YOURS.")
print("IF YOU CHOOSE THE LATTER, PLEASE TURN OFF YOUR COMPUTER")
print("BEFORE PROCEEDING.")
sys.exit()
def handle_too_many_foreigners(self) -> None:
print("\n\nTHE NUMBER OF FOREIGN WORKERS HAS EXCEEDED THE NUMBER")
print("OF COUNTRYMEN. AS A MINORITY, THEY HAVE REVOLTED AND")
print("TAKEN OVER THE COUNTRY.")
self.end_game()
def end_game(self) -> None:
if random() <= 0.5:
print("YOU HAVE BEEN ASSASSINATED.")
else:
print("YOU HAVE BEEN THROWN OUT OF OFFICE AND ARE NOW")
print("RESIDING IN PRISON.")
sys.exit()
def handle_congratulations(self) -> None:
print("\n\nCONGRATULATIONS!!!!!!!!!!!!!!!!!!")
print(f"YOU HAVE SUCCESFULLY COMPLETED YOUR {YEARS_IN_TERM} YEAR TERM")
print("OF OFFICE. YOU WERE, OF COURSE, EXTREMELY LUCKY, BUT")
print("NEVERTHELESS, IT'S QUITE AN ACHIEVEMENT. GOODBYE AND GOOD")
print("LUCK - YOU'LL PROBABLY NEED IT IF YOU'RE THE TYPE THAT")
print("PLAYS THIS GAME.")
sys.exit()
def print_header() -> None:
print(" " * 34 + "KING")
print(" " * 15 + "CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\n\n\n")
def print_instructions() -> None:
print(
f"""\n\n\nCONGRATULATIONS! YOU'VE JUST BEEN ELECTED PREMIER OF SETATS
DETINU, A SMALL COMMUNIST ISLAND 30 BY 70 MILES LONG. YOUR
JOB IS TO DECIDE UPON THE CONTRY'S BUDGET AND DISTRIBUTE
MONEY TO YOUR COUNTRYMEN FROM THE COMMUNAL TREASURY.
THE MONEY SYSTEM IS RALLODS, AND EACH PERSON NEEDS {COST_OF_LIVING}
RALLODS PER YEAR TO SURVIVE. YOUR COUNTRY'S INCOME COMES
FROM FARM PRODUCE AND TOURISTS VISITING YOUR MAGNIFICENT
FORESTS, HUNTING, FISHING, ETC. HALF YOUR LAND IS FARM LAND
WHICH ALSO HAS AN EXCELLENT MINERAL CONTENT AND MAY BE SOLD
TO FOREIGN INDUSTRY (STRIP MINING) WHO IMPORT AND SUPPORT
THEIR OWN WORKERS. CROPS COST BETWEEN 10 AND 15 RALLODS PER
SQUARE MILE TO PLANT.
YOUR GOAL IS TO COMPLETE YOUR {YEARS_IN_TERM} YEAR TERM OF OFFICE.
GOOD LUCK!"""
)
def ask_how_many_sq_to_plant(state: GameState) -> int:
while True:
sq = ask_int("HOW MANY SQUARE MILES DO YOU WISH TO PLANT? ")
if sq < 0:
continue
elif sq > 2 * state.countrymen:
print(" SORRY, BUT EACH COUNTRYMAN CAN ONLY PLANT 2 SQ. MILES.")
elif sq > state.farmland:
print(
f" SORRY, BUT YOU ONLY HAVE {state.farmland} "
"SQ. MILES OF FARM LAND."
)
elif sq * state.planting_cost > state.rallods:
print(
f" THINK AGAIN. YOU'VE ONLY {state.rallods} RALLODS "
"LEFT IN THE TREASURY."
)
else:
return sq
def ask_pollution_control(state: GameState) -> int:
while True:
rallods = ask_int(
"HOW MANY RALLODS DO YOU WISH TO SPEND ON POLLUTION CONTROL? "
)
if rallods > state.rallods:
print(f" THINK AGAIN. YOU ONLY HAVE {state.rallods} RALLODS REMAINING.")
elif rallods < 0:
continue
else:
return rallods
def ask_sell_to_industry(state: GameState) -> int:
had_first_err = False
first = """(FOREIGN INDUSTRY WILL ONLY BUY FARM LAND BECAUSE
FOREST LAND IS UNECONOMICAL TO STRIP MINE DUE TO TREES,
THICKER TOP SOIL, ETC.)"""
err = f"""*** THINK AGAIN. YOU ONLY HAVE {state.farmland} SQUARE MILES OF FARM LAND."""
while True:
sm = input("HOW MANY SQUARE MILES DO YOU WISH TO SELL TO INDUSTRY? ")
try:
sm_sell = int(sm)
except ValueError:
if not had_first_err:
print(first)
had_first_err = True
print(err)
continue
if sm_sell > state.farmland:
print(err)
elif sm_sell < 0:
continue
else:
return sm_sell
def ask_distribute_rallods(state: GameState) -> int:
while True:
rallods = ask_int(
"HOW MANY RALLODS WILL YOU DISTRIBUTE AMONG YOUR COUNTRYMEN? "
)
if rallods < 0:
continue
elif rallods > state.rallods:
print(
f" THINK AGAIN. YOU'VE ONLY {state.rallods} RALLODS IN THE TREASURY"
)
else:
return rallods
def resume() -> GameState:
while True:
years = ask_int("HOW MANY YEARS HAD YOU BEEN IN OFFICE WHEN INTERRUPTED? ")
if years < 0:
sys.exit()
if years >= YEARS_IN_TERM:
print(f" COME ON, YOUR TERM IN OFFICE IS ONLY {YEARS_IN_TERM} YEARS.")
else:
break
treasury = ask_int("HOW MUCH DID YOU HAVE IN THE TREASURY? ")
if treasury < 0:
sys.exit()
countrymen = ask_int("HOW MANY COUNTRYMEN? ")
if countrymen < 0:
sys.exit()
workers = ask_int("HOW MANY WORKERS? ")
if workers < 0:
sys.exit()
while True:
land = ask_int("HOW MANY SQUARE MILES OF LAND? ")
if land < 0:
sys.exit()
if land > INITIAL_LAND:
farm_land = INITIAL_LAND - FOREST_LAND
print(f" COME ON, YOU STARTED WITH {farm_land:,} SQ. MILES OF FARM LAND")
print(f" AND {FOREST_LAND:,} SQ. MILES OF FOREST LAND.")
if land > FOREST_LAND:
break
return GameState(
rallods=treasury,
countrymen=countrymen,
foreign_workers=workers,
years_in_office=years,
)
def main() -> None:
print_header()
want_instructions = input("DO YOU WANT INSTRUCTIONS? ").upper()
if want_instructions == "AGAIN":
state = resume()
else:
state = GameState(
rallods=randint(59000, 61000),
countrymen=randint(490, 510),
planting_cost=randint(10, 15),
)
if want_instructions != "NO":
print_instructions()
while True:
state.set_market_conditions()
state.print_status()
# Users actions
sm_sell_to_industry = ask_sell_to_industry(state)
state.sell_land(sm_sell_to_industry)
distributed_rallods = ask_distribute_rallods(state)
state.distribute_rallods(distributed_rallods)
planted_sq = ask_how_many_sq_to_plant(state)
state.plant(planted_sq)
polltion_control_spendings = ask_pollution_control(state)
state.spend_pollution_control(polltion_control_spendings)
# Run the year
state.handle_deaths(distributed_rallods, polltion_control_spendings)
state.handle_foreign_workers(
|
sm_sell_to_industry, distributed_rallods, polltion_control_spendings
)
|
random_line_split
|
|
king.py
|
def handle_deaths(
self, distributed_rallods: int, pollution_control_spendings: int
) -> None:
starved_countrymen = max(
0, int(self.countrymen - distributed_rallods / COST_OF_LIVING)
)
if starved_countrymen > 0:
print(f"{starved_countrymen} COUNTRYMEN DIED OF STARVATION")
self.pollution_deaths = int(random() * (INITIAL_LAND - self.land))
if pollution_control_spendings >= POLLUTION_CONTROL_FACTOR:
self.pollution_deaths = int(
self.pollution_deaths
/ (pollution_control_spendings / POLLUTION_CONTROL_FACTOR)
)
if self.pollution_deaths > 0:
print(
f"{self.pollution_deaths} COUNTRYMEN DIED OF CARBON-MONOXIDE "
f"AND DUST INHALATION"
)
self.died_contrymen = starved_countrymen + self.pollution_deaths
if self.died_contrymen > 0:
funeral_cost = self.died_contrymen * COST_OF_FUNERAL
print(f" YOU WERE FORCED TO SPEND {funeral_cost} RALLODS ON ")
print("FUNERAL EXPENSES.")
self.rallods -= funeral_cost
if self.rallods < 0:
print(" INSUFFICIENT RESERVES TO COVER COST - LAND WAS SOLD")
self.land += int(self.rallods / self.land_buy_price)
self.rallods = 0
self.countrymen -= self.died_contrymen
def handle_tourist_trade(self) -> None:
V1 = int(self.settled_people * 22 + random() * 500)
V2 = int((INITIAL_LAND - self.land) * 15)
tourist_trade_earnings = int(V1 - V2)
print(f" YOU MADE {tourist_trade_earnings} RALLODS FROM TOURIST TRADE.")
if V2 != 0 and not (V1 - V2 >= self.tourism_earnings):
print(" DECREASE BECAUSE ")
reason = randint(0, 10)
if reason <= 2:
print("FISH POPULATION HAS DWINDLED DUE TO WATER POLLUTION.")
if reason <= 4:
print("AIR POLLUTION IS KILLING GAME BIRD POPULATION.")
if reason <= 6:
print("MINERAL BATHS ARE BEING RUINED BY WATER POLLUTION.")
if reason <= 8:
print("UNPLEASANT SMOG IS DISCOURAGING SUN BATHERS.")
if reason <= 10:
print("HOTELS ARE LOOKING SHABBY DUE TO SMOG GRIT.")
# NOTE: The following two lines had a bug in the original game:
self.tourism_earnings = abs(int(V1 - V2))
self.rallods += self.tourism_earnings
def handle_harvest(self, planted_sq: int) -> None:
crop_loss = int((INITIAL_LAND - self.land) * ((random() + 1.5) / 2))
if self.foreign_workers != 0:
print(f"OF {planted_sq} SQ. MILES PLANTED,")
if planted_sq <= crop_loss:
crop_loss = planted_sq
harvested = int(planted_sq - crop_loss)
print(f" YOU HARVESTED {harvested} SQ. MILES OF CROPS.")
unlucky_harvesting_worse = crop_loss - self.crop_loss_last_year
if crop_loss != 0:
print(" (DUE TO ", end="")
if unlucky_harvesting_worse > 2:
print("INCREASED ", end="")
print("AIR AND WATER POLLUTION FROM FOREIGN INDUSTRY.)")
revenue = int((planted_sq - crop_loss) * (self.land_buy_price / 2))
print(f"MAKING {revenue} RALLODS.")
self.crop_loss_last_year = crop_loss
self.rallods += revenue
def handle_foreign_workers(
self,
sm_sell_to_industry: int,
distributed_rallods: int,
polltion_control_spendings: int,
) -> None:
foreign_workers_influx = 0
if sm_sell_to_industry != 0:
foreign_workers_influx = int(
sm_sell_to_industry + (random() * 10) - (random() * 20)
)
if self.foreign_workers <= 0:
foreign_workers_influx = foreign_workers_influx + 20
print(f"{foreign_workers_influx} WORKERS CAME TO THE COUNTRY AND")
surplus_distributed = distributed_rallods / COST_OF_LIVING - self.countrymen
population_change = int(
(surplus_distributed / 10)
+ (polltion_control_spendings / POLLUTION_CONTROL_FACTOR)
- ((INITIAL_LAND - self.land) / 50)
- (self.died_contrymen / 2)
)
print(f"{abs(population_change)} COUNTRYMEN ", end="")
if population_change < 0:
print("LEFT ", end="")
else:
print("CAME TO ", end="")
print("THE ISLAND")
self.countrymen += population_change
self.foreign_workers += int(foreign_workers_influx)
def handle_too_many_deaths(self) -> None:
print(f"\n\n\n{self.died_contrymen} COUNTRYMEN DIED IN ONE YEAR!!!!!")
print("\n\n\nDUE TO THIS EXTREME MISMANAGEMENT, YOU HAVE NOT ONLY")
print("BEEN IMPEACHED AND THROWN OUT OF OFFICE, BUT YOU")
message = randint(0, 10)
if message <= 3:
print("ALSO HAD YOUR LEFT EYE GOUGED OUT!")
if message <= 6:
print("HAVE ALSO GAINED A VERY BAD REPUTATION.")
if message <= 10:
print("HAVE ALSO BEEN DECLARED NATIONAL FINK.")
sys.exit()
def handle_third_died(self) -> None:
print()
print()
print("OVER ONE THIRD OF THE POPULTATION HAS DIED SINCE YOU")
print("WERE ELECTED TO OFFICE. THE PEOPLE (REMAINING)")
print("HATE YOUR GUTS.")
self.end_game()
def handle_money_mismanagement(self) -> None:
print()
print("MONEY WAS LEFT OVER IN THE TREASURY WHICH YOU DID")
print("NOT SPEND. AS A RESULT, SOME OF YOUR COUNTRYMEN DIED")
print("OF STARVATION. THE PUBLIC IS ENRAGED AND YOU HAVE")
print("BEEN FORCED TO EITHER RESIGN OR COMMIT SUICIDE.")
print("THE CHOICE IS YOURS.")
print("IF YOU CHOOSE THE LATTER, PLEASE TURN OFF YOUR COMPUTER")
print("BEFORE PROCEEDING.")
sys.exit()
def handle_too_many_foreigners(self) -> None:
print("\n\nTHE NUMBER OF FOREIGN WORKERS HAS EXCEEDED THE NUMBER")
print("OF COUNTRYMEN. AS A MINORITY, THEY HAVE REVOLTED AND")
print("TAKEN OVER THE COUNTRY.")
self.end_game()
def end_game(self) -> None:
if random() <= 0.5:
print("YOU HAVE BEEN ASSASSINATED.")
else:
print("YOU HAVE BEEN THROWN OUT OF OFFICE AND ARE NOW")
print("RESIDING IN PRISON.")
sys.exit()
def handle_congratulations(self) -> None:
print("\n\nCONGRATULATIONS!!!!!!!!!!!!!!!!!!")
print(f"YOU HAVE SUCCESFULLY COMPLETED YOUR {YEARS_IN_TERM} YEAR TERM")
print("OF OFFICE. YOU WERE, OF COURSE, EXTREMELY LUCKY, BUT")
print("NEVERTHELESS, IT'S QUITE AN ACHIEVEMENT. GOODBYE AND GOOD")
print("LUCK - YOU'LL PROBABLY NEED IT IF YOU'RE THE TYPE THAT")
print("PLAYS THIS GAME.")
sys.exit()
def print_header() -> None:
print(" " * 34 + "KING")
print(" " * 15 + "CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\n\n\n")
def print_instructions() -> None:
print(
f""
|
print(f"\n\nYOU NOW HAVE {self.rallods} RALLODS IN THE TREASURY.")
print(f"{int(self.countrymen)} COUNTRYMEN, ", end="")
if self.foreign_workers > 0:
print(f"{int(self.foreign_workers)} FOREIGN WORKERS, ", end="")
print(f"AND {self.land} SQ. MILES OF LAND.")
print(
f"THIS YEAR INDUSTRY WILL BUY LAND FOR {self.land_buy_price} "
"RALLODS PER SQUARE MILE."
)
print(
f"LAND CURRENTLY COSTS {self.planting_cost} RALLODS "
"PER SQUARE MILE TO PLANT.\n"
)
|
identifier_body
|
|
king.py
|
S {self.planting_cost} RALLODS "
"PER SQUARE MILE TO PLANT.\n"
)
def handle_deaths(
self, distributed_rallods: int, pollution_control_spendings: int
) -> None:
starved_countrymen = max(
0, int(self.countrymen - distributed_rallods / COST_OF_LIVING)
)
if starved_countrymen > 0:
print(f"{starved_countrymen} COUNTRYMEN DIED OF STARVATION")
self.pollution_deaths = int(random() * (INITIAL_LAND - self.land))
if pollution_control_spendings >= POLLUTION_CONTROL_FACTOR:
self.pollution_deaths = int(
self.pollution_deaths
/ (pollution_control_spendings / POLLUTION_CONTROL_FACTOR)
)
if self.pollution_deaths > 0:
print(
f"{self.pollution_deaths} COUNTRYMEN DIED OF CARBON-MONOXIDE "
f"AND DUST INHALATION"
)
self.died_contrymen = starved_countrymen + self.pollution_deaths
if self.died_contrymen > 0:
funeral_cost = self.died_contrymen * COST_OF_FUNERAL
print(f" YOU WERE FORCED TO SPEND {funeral_cost} RALLODS ON ")
print("FUNERAL EXPENSES.")
self.rallods -= funeral_cost
if self.rallods < 0:
print(" INSUFFICIENT RESERVES TO COVER COST - LAND WAS SOLD")
self.land += int(self.rallods / self.land_buy_price)
self.rallods = 0
self.countrymen -= self.died_contrymen
def handle_tourist_trade(self) -> None:
V1 = int(self.settled_people * 22 + random() * 500)
V2 = int((INITIAL_LAND - self.land) * 15)
tourist_trade_earnings = int(V1 - V2)
print(f" YOU MADE {tourist_trade_earnings} RALLODS FROM TOURIST TRADE.")
if V2 != 0 and not (V1 - V2 >= self.tourism_earnings):
print(" DECREASE BECAUSE ")
reason = randint(0, 10)
if reason <= 2:
print("FISH POPULATION HAS DWINDLED DUE TO WATER POLLUTION.")
if reason <= 4:
print("AIR POLLUTION IS KILLING GAME BIRD POPULATION.")
if reason <= 6:
print("MINERAL BATHS ARE BEING RUINED BY WATER POLLUTION.")
if reason <= 8:
print("UNPLEASANT SMOG IS DISCOURAGING SUN BATHERS.")
if reason <= 10:
print("HOTELS ARE LOOKING SHABBY DUE TO SMOG GRIT.")
# NOTE: The following two lines had a bug in the original game:
self.tourism_earnings = abs(int(V1 - V2))
self.rallods += self.tourism_earnings
def handle_harvest(self, planted_sq: int) -> None:
crop_loss = int((INITIAL_LAND - self.land) * ((random() + 1.5) / 2))
if self.foreign_workers != 0:
print(f"OF {planted_sq} SQ. MILES PLANTED,")
if planted_sq <= crop_loss:
crop_loss = planted_sq
harvested = int(planted_sq - crop_loss)
print(f" YOU HARVESTED {harvested} SQ. MILES OF CROPS.")
unlucky_harvesting_worse = crop_loss - self.crop_loss_last_year
if crop_loss != 0:
print(" (DUE TO ", end="")
if unlucky_harvesting_worse > 2:
print("INCREASED ", end="")
print("AIR AND WATER POLLUTION FROM FOREIGN INDUSTRY.)")
revenue = int((planted_sq - crop_loss) * (self.land_buy_price / 2))
print(f"MAKING {revenue} RALLODS.")
self.crop_loss_last_year = crop_loss
self.rallods += revenue
def handle_foreign_workers(
self,
sm_sell_to_industry: int,
distributed_rallods: int,
polltion_control_spendings: int,
) -> None:
foreign_workers_influx = 0
if sm_sell_to_industry != 0:
foreign_workers_influx = int(
sm_sell_to_industry + (random() * 10) - (random() * 20)
)
if self.foreign_workers <= 0:
foreign_workers_influx = foreign_workers_influx + 20
print(f"{foreign_workers_influx} WORKERS CAME TO THE COUNTRY AND")
surplus_distributed = distributed_rallods / COST_OF_LIVING - self.countrymen
population_change = int(
(surplus_distributed / 10)
+ (polltion_control_spendings / POLLUTION_CONTROL_FACTOR)
- ((INITIAL_LAND - self.land) / 50)
- (self.died_contrymen / 2)
)
print(f"{abs(population_change)} COUNTRYMEN ", end="")
if population_change < 0:
print("LEFT ", end="")
else:
print("CAME TO ", end="")
print("THE ISLAND")
self.countrymen += population_change
self.foreign_workers += int(foreign_workers_influx)
def
|
(self) -> None:
print(f"\n\n\n{self.died_contrymen} COUNTRYMEN DIED IN ONE YEAR!!!!!")
print("\n\n\nDUE TO THIS EXTREME MISMANAGEMENT, YOU HAVE NOT ONLY")
print("BEEN IMPEACHED AND THROWN OUT OF OFFICE, BUT YOU")
message = randint(0, 10)
if message <= 3:
print("ALSO HAD YOUR LEFT EYE GOUGED OUT!")
if message <= 6:
print("HAVE ALSO GAINED A VERY BAD REPUTATION.")
if message <= 10:
print("HAVE ALSO BEEN DECLARED NATIONAL FINK.")
sys.exit()
def handle_third_died(self) -> None:
print()
print()
print("OVER ONE THIRD OF THE POPULTATION HAS DIED SINCE YOU")
print("WERE ELECTED TO OFFICE. THE PEOPLE (REMAINING)")
print("HATE YOUR GUTS.")
self.end_game()
def handle_money_mismanagement(self) -> None:
print()
print("MONEY WAS LEFT OVER IN THE TREASURY WHICH YOU DID")
print("NOT SPEND. AS A RESULT, SOME OF YOUR COUNTRYMEN DIED")
print("OF STARVATION. THE PUBLIC IS ENRAGED AND YOU HAVE")
print("BEEN FORCED TO EITHER RESIGN OR COMMIT SUICIDE.")
print("THE CHOICE IS YOURS.")
print("IF YOU CHOOSE THE LATTER, PLEASE TURN OFF YOUR COMPUTER")
print("BEFORE PROCEEDING.")
sys.exit()
def handle_too_many_foreigners(self) -> None:
print("\n\nTHE NUMBER OF FOREIGN WORKERS HAS EXCEEDED THE NUMBER")
print("OF COUNTRYMEN. AS A MINORITY, THEY HAVE REVOLTED AND")
print("TAKEN OVER THE COUNTRY.")
self.end_game()
def end_game(self) -> None:
if random() <= 0.5:
print("YOU HAVE BEEN ASSASSINATED.")
else:
print("YOU HAVE BEEN THROWN OUT OF OFFICE AND ARE NOW")
print("RESIDING IN PRISON.")
sys.exit()
def handle_congratulations(self) -> None:
print("\n\nCONGRATULATIONS!!!!!!!!!!!!!!!!!!")
print(f"YOU HAVE SUCCESFULLY COMPLETED YOUR {YEARS_IN_TERM} YEAR TERM")
print("OF OFFICE. YOU WERE, OF COURSE, EXTREMELY LUCKY, BUT")
print("NEVERTHELESS, IT'S QUITE AN ACHIEVEMENT. GOODBYE AND GOOD")
print("LUCK - YOU'LL PROBABLY NEED IT IF YOU'RE THE TYPE THAT")
print("PLAYS THIS GAME.")
sys.exit()
def print_header() -> None:
print(" " * 34 + "KING")
print(" " * 15 + "CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\n\n\n")
def print_instructions() -> None:
print(
f"""\n\n\nCONGRATULATIONS! YOU'VE JUST BEEN ELECTED PREMIER OF SETATS
DETINU, A SMALL COMMUNIST ISLAND 30 BY 70 MILES LONG. YOUR
JOB IS TO DECIDE UPON THE CONTRY'S BUDGET AND DISTRIBUTE
MONEY TO YOUR COUNTRYMEN FROM THE COMMUNAL TREASURY.
THE MONEY SYSTEM IS RALLODS, AND EACH PERSON NEEDS {COST_OF_LIVING}
RALLODS PER YEAR TO SURVIVE. YOUR COUNTRY'S INCOME COMES
FROM FARM PRODUCE
|
handle_too_many_deaths
|
identifier_name
|
mod.rs
|
32, f32))>,
pub statusbar: Statusbar,
pub custom_menu_height: f32,
/// Backend that is currently being configured.
pub config_backend: Option<BackendHandle>,
/// View currently being renamed
view_rename_state: ViewRenameState,
}
impl Window {
pub fn new(width: usize, height: usize) -> minifb::Result<Window> {
let options = WindowOptions {
resize: true,
scale: Scale::X1,
..WindowOptions::default()
};
let win = try!(minifb::Window::new("ProDBG", width, height, options));
let ws = Workspace::new(Rect::new(0.0, 0.0, width as f32, (height - 20) as f32));
let ws_states = VecDeque::with_capacity(WORKSPACE_UNDO_LIMIT);
let mut res = Window {
win: win,
menu: Menu::new(),
menu_id_offset: 1000,
mouse_state: MouseState::Default,
ws: ws,
ws_states: ws_states,
cur_state_index: 0usize,
removed_instances: HashMap::new(),
overlay: None,
context_menu_data: None,
statusbar: Statusbar::new(),
custom_menu_height: 0.0,
config_backend: None,
view_rename_state: ViewRenameState::None,
};
res.initialize_workspace_state();
Ok(res)
}
pub fn pre_update(&mut self) {
self.update_imgui_mouse();
self.update_imgui_keys();
}
pub fn update(&mut self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if !views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(())
}
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle,
session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0 != handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1
|
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close: !open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let
|
{
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
}
|
conditional_block
|
mod.rs
|
32, f32))>,
pub statusbar: Statusbar,
pub custom_menu_height: f32,
/// Backend that is currently being configured.
pub config_backend: Option<BackendHandle>,
/// View currently being renamed
view_rename_state: ViewRenameState,
}
impl Window {
pub fn new(width: usize, height: usize) -> minifb::Result<Window> {
let options = WindowOptions {
resize: true,
scale: Scale::X1,
..WindowOptions::default()
};
let win = try!(minifb::Window::new("ProDBG", width, height, options));
let ws = Workspace::new(Rect::new(0.0, 0.0, width as f32, (height - 20) as f32));
let ws_states = VecDeque::with_capacity(WORKSPACE_UNDO_LIMIT);
let mut res = Window {
win: win,
menu: Menu::new(),
menu_id_offset: 1000,
mouse_state: MouseState::Default,
ws: ws,
ws_states: ws_states,
cur_state_index: 0usize,
removed_instances: HashMap::new(),
overlay: None,
context_menu_data: None,
statusbar: Statusbar::new(),
custom_menu_height: 0.0,
config_backend: None,
view_rename_state: ViewRenameState::None,
};
res.initialize_workspace_state();
Ok(res)
}
pub fn pre_update(&mut self) {
self.update_imgui_mouse();
self.update_imgui_keys();
}
pub fn update(&mut self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if !views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(())
|
session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0 != handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1 {
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
}
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close: !open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let Some
|
}
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle,
|
random_line_split
|
mod.rs
|
self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if !views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(())
}
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle,
session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0 != handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1 {
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
}
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close: !open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let Some(instance) = view_plugins.get_view(*view) {
self.removed_instances.insert(view.0, PluginInstanceInfo::new(instance));
}
view_plugins.destroy_instance(*view);
self.ws.delete_dock_by_handle(DockHandle(view.0));
}
}
fn update_backend_configure(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins) {
if self.config_backend == None {
return;
}
let backend = backend_plugins.get_backend(self.config_backend).unwrap();
unsafe {
let plugin_funcs = backend.plugin_type.plugin_funcs as *mut CBackendCallbacks;
if let Some(show_config) = (*plugin_funcs).show_config {
let ui = Imgui::get_ui();
ui.open_popup("config");
if ui.begin_popup_modal("config") {
show_config(backend.plugin_data, Imgui::get_ui_funs() as *mut c_void);
let ok_size = Some(Vec2 { x: 120.0, y: 0.0 });
let cancel_size = Some(Vec2 { x: 120.0, y: 0.0 });
if ui.button("Ok", ok_size) {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
ui.close_current_popup();
}
ui.same_line(0, -1);
if ui.button("Cancel", cancel_size) {
self.config_backend = None;
ui.close_current_popup();
}
ui.end_popup();
}
} else {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
}
}
}
fn
|
has_source_code_view
|
identifier_name
|
|
mod.rs
|
32, f32))>,
pub statusbar: Statusbar,
pub custom_menu_height: f32,
/// Backend that is currently being configured.
pub config_backend: Option<BackendHandle>,
/// View currently being renamed
view_rename_state: ViewRenameState,
}
impl Window {
pub fn new(width: usize, height: usize) -> minifb::Result<Window>
|
context_menu_data: None,
statusbar: Statusbar::new(),
custom_menu_height: 0.0,
config_backend: None,
view_rename_state: ViewRenameState::None,
};
res.initialize_workspace_state();
Ok(res)
}
pub fn pre_update(&mut self) {
self.update_imgui_mouse();
self.update_imgui_keys();
}
pub fn update(&mut self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if !views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(())
}
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle,
session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0 != handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1 {
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
}
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close: !open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let Some
|
{
let options = WindowOptions {
resize: true,
scale: Scale::X1,
..WindowOptions::default()
};
let win = try!(minifb::Window::new("ProDBG", width, height, options));
let ws = Workspace::new(Rect::new(0.0, 0.0, width as f32, (height - 20) as f32));
let ws_states = VecDeque::with_capacity(WORKSPACE_UNDO_LIMIT);
let mut res = Window {
win: win,
menu: Menu::new(),
menu_id_offset: 1000,
mouse_state: MouseState::Default,
ws: ws,
ws_states: ws_states,
cur_state_index: 0usize,
removed_instances: HashMap::new(),
overlay: None,
|
identifier_body
|
stack.rs
|
list constructed from headers on each object instead of a stack of pointers.
//!
//! This implementation is generic in that it does not prescribe a method for mapping objects to
//! their containing slabs, but instead requires that an implementation of this functionality be
//! provided (see the `ConfigData` trait). The `aligned` module implements this functionality by
//! ensuring that slabs have an alignment equal to their size, and using this to compute a bit mask
//! for objects in the slab. The `large` module implements this functionality by storing object
//! pointers or page addresses in an allocator-global hash table.
//!
//! # Layout
//! The layout of stack-based slabs is somewhat confusing, and not readily obvious from the code.
//! This is due largely to the fact that slab size cannot be known at compile time, and must
//! instead be computed at runtime. Why this is a problem will become apparent shortly.
//!
//! The layout in memory of stack-based slabs is as follows:
//!
//! ```text
//! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects>
//! ```
//!
//! The following requirements must be met with respect to memory layout:
//!
//! * The stack - which is an array of `usize` - must be aligned according to the alignment
//! required by `usize`
//! * The array of objects must be aligned according to the alignment requested by the user.
//!
//! The first requirement implies that there may need to be some padding between the header and the
//! stack. The second requirement implies that there may need to be some padding between the stack
//! and the array of objects.
//!
//! If the number of objects in a slab could be known statically, the stack could simply be an
//! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a
//! field in the header (it could technically be `[*mut T]`, but this would make querying the
//! header's size more difficult).
//!
//! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to
//! dynamically compute the proper pre-stack padding required in order to give the stack the proper
//! alignment. We do the same for the post-stack padding in order to give the array of objects the
//! proper alignment.
use alloc::alloc;
use core::ptr::NonNull;
use core::{mem, ptr};
use init::InitSystem;
use object_alloc::UntypedObjectAlloc;
use util::color::{Color, ColorSettings};
use util::list::*;
use util::stack::Stack;
use SlabSystem;
/// Configuration to customize a stack-based slab implementation.
///
/// `ConfigData` completes the stack-based slab implementation by providing post-alloc and
/// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab.
pub trait ConfigData
where
Self: Sized,
{
/// Perform per-slab post-allocation work.
///
/// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional,
/// and defaults to a no-op.
#[allow(unused)]
fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Perform per-slab pre-deallocation work.
///
/// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and
/// defaults to a no-op.
#[allow(unused)]
fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Look up an object's slab.
///
/// Given an object, `ptr_to_slab` locates the slab containing that object.
fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>;
}
pub struct System<A: UntypedObjectAlloc, C: ConfigData> {
pub data: C,
layout: Layout,
alloc: A,
}
impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> {
pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> {
System {
data: data,
layout: layout,
alloc: alloc,
}
}
}
impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> {
type Slab = SlabHeader;
fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> {
unsafe {
let color = self
.layout
.color_settings
.next_color(self.layout.layout.align());
let slab = self.alloc.alloc()?.cast();
ptr::write(
slab.as_ptr(),
SlabHeader {
stack: Stack::new(),
color: color,
next: None,
prev: None,
},
);
let stack_data_ptr = self.layout.stack_begin(slab);
for i in 0..self.layout.num_obj {
let ptr = self.layout.nth_obj(slab, color, i);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(ptr, I::status_uninitialized()));
}
self.data
.post_alloc(&self.layout, self.alloc.layout().size(), slab);
Some(slab)
}
}
fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) {
unsafe {
debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj);
self.data
.pre_dealloc(&self.layout, self.alloc.layout().size(), slab);
let stack_data_ptr = self.layout.stack_begin(slab);
for _ in 0..self.layout.num_obj {
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
I::drop(I::unpack_ptr(packed), I::unpack_status(packed));
}
self.alloc.dealloc(slab.cast());
}
}
fn is_full(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj }
}
fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == 0 }
}
fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) {
unsafe {
let stack_data_ptr = self.layout.stack_begin(slab);
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
(I::unpack_ptr(packed), I::unpack_status(packed))
}
}
fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool) {
unsafe {
let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj);
let was_empty = (*slab.as_ptr()).stack.size() == 0;
let stack_data_ptr = self.layout.stack_begin(slab);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(obj, init_status));
(slab, was_empty)
}
}
}
pub struct SlabHeader {
stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header
color: Color, // extra padding added before array beginning
next: Option<NonNull<SlabHeader>>,
prev: Option<NonNull<SlabHeader>>,
}
impl Linkable for SlabHeader {
fn
|
(&self) -> Option<NonNull<SlabHeader>> {
self.next
}
fn prev(&self) -> Option<NonNull<SlabHeader>> {
self.prev
}
fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) {
self.next = next;
}
fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) {
self.prev = prev;
}
}
impl SlabHeader {
pub fn get_color(&self) -> Color {
self.color
}
}
#[derive(Clone)]
pub struct Layout {
pub num_obj: usize,
pub layout: alloc::Layout,
pub stack_begin_offset: usize,
pub array_begin_offset: usize,
pub color_settings: ColorSettings,
}
impl Layout {
/// Determines whether an allocator can be constructed for T using the given slab size. If so,
/// it returns a constructed Layout for T using that slab size and the amount of unused space
/// left at the end of the slab (when no coloring is used).
pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> {
let obj_size = layout.size();
let obj_align = layout.align();
let hdr_size = mem::size_of::<SlabHeader>();
// padding between the SlabHeader and the base of the pointer stack
let pre_stack_padding = Stack::<usize>::padding_after(hdr_size);
let stack_begin_offset = hdr_size + pre_stack_padding;
// Find the largest number of objects we can fit in the slab. array_begin_offset is the
// offset from the beginning of the slab of the array of objects.
let (mut num_obj, mut array_begin_offset) = (0, 0);
loop {
let candidate = num_obj + 1;
// total_hdr_size =
|
next
|
identifier_name
|
stack.rs
|
list constructed from headers on each object instead of a stack of pointers.
//!
//! This implementation is generic in that it does not prescribe a method for mapping objects to
//! their containing slabs, but instead requires that an implementation of this functionality be
//! provided (see the `ConfigData` trait). The `aligned` module implements this functionality by
//! ensuring that slabs have an alignment equal to their size, and using this to compute a bit mask
//! for objects in the slab. The `large` module implements this functionality by storing object
//! pointers or page addresses in an allocator-global hash table.
//!
//! # Layout
//! The layout of stack-based slabs is somewhat confusing, and not readily obvious from the code.
//! This is due largely to the fact that slab size cannot be known at compile time, and must
//! instead be computed at runtime. Why this is a problem will become apparent shortly.
//!
//! The layout in memory of stack-based slabs is as follows:
//!
//! ```text
//! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects>
//! ```
//!
//! The following requirements must be met with respect to memory layout:
//!
//! * The stack - which is an array of `usize` - must be aligned according to the alignment
//! required by `usize`
//! * The array of objects must be aligned according to the alignment requested by the user.
//!
//! The first requirement implies that there may need to be some padding between the header and the
//! stack. The second requirement implies that there may need to be some padding between the stack
//! and the array of objects.
//!
//! If the number of objects in a slab could be known statically, the stack could simply be an
//! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a
//! field in the header (it could technically be `[*mut T]`, but this would make querying the
//! header's size more difficult).
//!
//! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to
//! dynamically compute the proper pre-stack padding required in order to give the stack the proper
//! alignment. We do the same for the post-stack padding in order to give the array of objects the
//! proper alignment.
use alloc::alloc;
use core::ptr::NonNull;
use core::{mem, ptr};
use init::InitSystem;
use object_alloc::UntypedObjectAlloc;
use util::color::{Color, ColorSettings};
use util::list::*;
use util::stack::Stack;
use SlabSystem;
/// Configuration to customize a stack-based slab implementation.
///
/// `ConfigData` completes the stack-based slab implementation by providing post-alloc and
/// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab.
pub trait ConfigData
where
Self: Sized,
{
/// Perform per-slab post-allocation work.
///
/// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional,
/// and defaults to a no-op.
#[allow(unused)]
fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Perform per-slab pre-deallocation work.
///
/// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and
/// defaults to a no-op.
#[allow(unused)]
fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Look up an object's slab.
///
/// Given an object, `ptr_to_slab` locates the slab containing that object.
fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>;
}
pub struct System<A: UntypedObjectAlloc, C: ConfigData> {
pub data: C,
layout: Layout,
alloc: A,
}
impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> {
pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> {
System {
data: data,
layout: layout,
alloc: alloc,
}
}
}
impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> {
type Slab = SlabHeader;
fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> {
unsafe {
let color = self
.layout
.color_settings
.next_color(self.layout.layout.align());
let slab = self.alloc.alloc()?.cast();
ptr::write(
slab.as_ptr(),
SlabHeader {
stack: Stack::new(),
color: color,
next: None,
prev: None,
},
);
let stack_data_ptr = self.layout.stack_begin(slab);
for i in 0..self.layout.num_obj {
let ptr = self.layout.nth_obj(slab, color, i);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(ptr, I::status_uninitialized()));
}
self.data
.post_alloc(&self.layout, self.alloc.layout().size(), slab);
Some(slab)
}
}
fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) {
unsafe {
debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj);
self.data
.pre_dealloc(&self.layout, self.alloc.layout().size(), slab);
let stack_data_ptr = self.layout.stack_begin(slab);
for _ in 0..self.layout.num_obj {
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
I::drop(I::unpack_ptr(packed), I::unpack_status(packed));
}
self.alloc.dealloc(slab.cast());
}
}
fn is_full(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj }
}
fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == 0 }
}
fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) {
unsafe {
let stack_data_ptr = self.layout.stack_begin(slab);
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
(I::unpack_ptr(packed), I::unpack_status(packed))
}
}
fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool) {
unsafe {
let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj);
let was_empty = (*slab.as_ptr()).stack.size() == 0;
let stack_data_ptr = self.layout.stack_begin(slab);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(obj, init_status));
(slab, was_empty)
}
}
}
pub struct SlabHeader {
stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header
color: Color, // extra padding added before array beginning
next: Option<NonNull<SlabHeader>>,
prev: Option<NonNull<SlabHeader>>,
}
impl Linkable for SlabHeader {
fn next(&self) -> Option<NonNull<SlabHeader>> {
self.next
}
fn prev(&self) -> Option<NonNull<SlabHeader>> {
self.prev
}
fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) {
self.next = next;
}
fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) {
self.prev = prev;
}
}
impl SlabHeader {
pub fn get_color(&self) -> Color {
self.color
}
}
#[derive(Clone)]
pub struct Layout {
pub num_obj: usize,
pub layout: alloc::Layout,
pub stack_begin_offset: usize,
pub array_begin_offset: usize,
pub color_settings: ColorSettings,
}
impl Layout {
/// Determines whether an allocator can be constructed for T using the given slab size. If so,
/// it returns a constructed Layout for T using that slab size and the amount of unused space
/// left at the end of the slab (when no coloring is used).
pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> {
let obj_size = layout.size();
let obj_align = layout.align();
let hdr_size = mem::size_of::<SlabHeader>();
// padding between the SlabHeader and the base of the pointer stack
let pre_stack_padding = Stack::<usize>::padding_after(hdr_size);
|
loop {
let candidate = num_obj + 1;
// total_hdr_size = size
|
let stack_begin_offset = hdr_size + pre_stack_padding;
// Find the largest number of objects we can fit in the slab. array_begin_offset is the
// offset from the beginning of the slab of the array of objects.
let (mut num_obj, mut array_begin_offset) = (0, 0);
|
random_line_split
|
stack.rs
|
//! This is due largely to the fact that slab size cannot be known at compile time, and must
//! instead be computed at runtime. Why this is a problem will become apparent shortly.
//!
//! The layout in memory of stack-based slabs is as follows:
//!
//! ```text
//! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects>
//! ```
//!
//! The following requirements must be met with respect to memory layout:
//!
//! * The stack - which is an array of `usize` - must be aligned according to the alignment
//! required by `usize`
//! * The array of objects must be aligned according to the alignment requested by the user.
//!
//! The first requirement implies that there may need to be some padding between the header and the
//! stack. The second requirement implies that there may need to be some padding between the stack
//! and the array of objects.
//!
//! If the number of objects in a slab could be known statically, the stack could simply be an
//! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a
//! field in the header (it could technically be `[*mut T]`, but this would make querying the
//! header's size more difficult).
//!
//! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to
//! dynamically compute the proper pre-stack padding required in order to give the stack the proper
//! alignment. We do the same for the post-stack padding in order to give the array of objects the
//! proper alignment.
use alloc::alloc;
use core::ptr::NonNull;
use core::{mem, ptr};
use init::InitSystem;
use object_alloc::UntypedObjectAlloc;
use util::color::{Color, ColorSettings};
use util::list::*;
use util::stack::Stack;
use SlabSystem;
/// Configuration to customize a stack-based slab implementation.
///
/// `ConfigData` completes the stack-based slab implementation by providing post-alloc and
/// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab.
pub trait ConfigData
where
Self: Sized,
{
/// Perform per-slab post-allocation work.
///
/// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional,
/// and defaults to a no-op.
#[allow(unused)]
fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Perform per-slab pre-deallocation work.
///
/// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and
/// defaults to a no-op.
#[allow(unused)]
fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Look up an object's slab.
///
/// Given an object, `ptr_to_slab` locates the slab containing that object.
fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>;
}
pub struct System<A: UntypedObjectAlloc, C: ConfigData> {
pub data: C,
layout: Layout,
alloc: A,
}
impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> {
pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> {
System {
data: data,
layout: layout,
alloc: alloc,
}
}
}
impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> {
type Slab = SlabHeader;
fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> {
unsafe {
let color = self
.layout
.color_settings
.next_color(self.layout.layout.align());
let slab = self.alloc.alloc()?.cast();
ptr::write(
slab.as_ptr(),
SlabHeader {
stack: Stack::new(),
color: color,
next: None,
prev: None,
},
);
let stack_data_ptr = self.layout.stack_begin(slab);
for i in 0..self.layout.num_obj {
let ptr = self.layout.nth_obj(slab, color, i);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(ptr, I::status_uninitialized()));
}
self.data
.post_alloc(&self.layout, self.alloc.layout().size(), slab);
Some(slab)
}
}
fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) {
unsafe {
debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj);
self.data
.pre_dealloc(&self.layout, self.alloc.layout().size(), slab);
let stack_data_ptr = self.layout.stack_begin(slab);
for _ in 0..self.layout.num_obj {
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
I::drop(I::unpack_ptr(packed), I::unpack_status(packed));
}
self.alloc.dealloc(slab.cast());
}
}
fn is_full(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj }
}
fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == 0 }
}
fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) {
unsafe {
let stack_data_ptr = self.layout.stack_begin(slab);
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
(I::unpack_ptr(packed), I::unpack_status(packed))
}
}
fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool) {
unsafe {
let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj);
let was_empty = (*slab.as_ptr()).stack.size() == 0;
let stack_data_ptr = self.layout.stack_begin(slab);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(obj, init_status));
(slab, was_empty)
}
}
}
pub struct SlabHeader {
stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header
color: Color, // extra padding added before array beginning
next: Option<NonNull<SlabHeader>>,
prev: Option<NonNull<SlabHeader>>,
}
impl Linkable for SlabHeader {
fn next(&self) -> Option<NonNull<SlabHeader>> {
self.next
}
fn prev(&self) -> Option<NonNull<SlabHeader>> {
self.prev
}
fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) {
self.next = next;
}
fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) {
self.prev = prev;
}
}
impl SlabHeader {
pub fn get_color(&self) -> Color {
self.color
}
}
#[derive(Clone)]
pub struct Layout {
pub num_obj: usize,
pub layout: alloc::Layout,
pub stack_begin_offset: usize,
pub array_begin_offset: usize,
pub color_settings: ColorSettings,
}
impl Layout {
/// Determines whether an allocator can be constructed for T using the given slab size. If so,
/// it returns a constructed Layout for T using that slab size and the amount of unused space
/// left at the end of the slab (when no coloring is used).
pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> {
let obj_size = layout.size();
let obj_align = layout.align();
let hdr_size = mem::size_of::<SlabHeader>();
// padding between the SlabHeader and the base of the pointer stack
let pre_stack_padding = Stack::<usize>::padding_after(hdr_size);
let stack_begin_offset = hdr_size + pre_stack_padding;
// Find the largest number of objects we can fit in the slab. array_begin_offset is the
// offset from the beginning of the slab of the array of objects.
let (mut num_obj, mut array_begin_offset) = (0, 0);
loop {
let candidate = num_obj + 1;
// total_hdr_size = size of header, post-header padding, and stack
let total_hdr_size = stack_begin_offset + Stack::<usize>::bytes_for(candidate);
// Padding between the pointer stack and the array of objects. NOTE:
// The Layout alignment isn't used here, so we use 1 because it's
// guaranteed not to cause from_size_align to return None.
let post_stack_padding = alloc::Layout::from_size_align(total_hdr_size, 1)
.unwrap()
.padding_needed_for(obj_align);
if total_hdr_size + post_stack_padding + (candidate * obj_size) <= slab_size
|
{
num_obj = candidate;
array_begin_offset = total_hdr_size + post_stack_padding;
}
|
conditional_block
|
|
stack.rs
|
constructed from headers on each object instead of a stack of pointers.
//!
//! This implementation is generic in that it does not prescribe a method for mapping objects to
//! their containing slabs, but instead requires that an implementation of this functionality be
//! provided (see the `ConfigData` trait). The `aligned` module implements this functionality by
//! ensuring that slabs have an alignment equal to their size, and using this to compute a bit mask
//! for objects in the slab. The `large` module implements this functionality by storing object
//! pointers or page addresses in an allocator-global hash table.
//!
//! # Layout
//! The layout of stack-based slabs is somewhat confusing, and not readily obvious from the code.
//! This is due largely to the fact that slab size cannot be known at compile time, and must
//! instead be computed at runtime. Why this is a problem will become apparent shortly.
//!
//! The layout in memory of stack-based slabs is as follows:
//!
//! ```text
//! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects>
//! ```
//!
//! The following requirements must be met with respect to memory layout:
//!
//! * The stack - which is an array of `usize` - must be aligned according to the alignment
//! required by `usize`
//! * The array of objects must be aligned according to the alignment requested by the user.
//!
//! The first requirement implies that there may need to be some padding between the header and the
//! stack. The second requirement implies that there may need to be some padding between the stack
//! and the array of objects.
//!
//! If the number of objects in a slab could be known statically, the stack could simply be an
//! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a
//! field in the header (it could technically be `[*mut T]`, but this would make querying the
//! header's size more difficult).
//!
//! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to
//! dynamically compute the proper pre-stack padding required in order to give the stack the proper
//! alignment. We do the same for the post-stack padding in order to give the array of objects the
//! proper alignment.
use alloc::alloc;
use core::ptr::NonNull;
use core::{mem, ptr};
use init::InitSystem;
use object_alloc::UntypedObjectAlloc;
use util::color::{Color, ColorSettings};
use util::list::*;
use util::stack::Stack;
use SlabSystem;
/// Configuration to customize a stack-based slab implementation.
///
/// `ConfigData` completes the stack-based slab implementation by providing post-alloc and
/// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab.
pub trait ConfigData
where
Self: Sized,
{
/// Perform per-slab post-allocation work.
///
/// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional,
/// and defaults to a no-op.
#[allow(unused)]
fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Perform per-slab pre-deallocation work.
///
/// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and
/// defaults to a no-op.
#[allow(unused)]
fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Look up an object's slab.
///
/// Given an object, `ptr_to_slab` locates the slab containing that object.
fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>;
}
pub struct System<A: UntypedObjectAlloc, C: ConfigData> {
pub data: C,
layout: Layout,
alloc: A,
}
impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> {
pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> {
System {
data: data,
layout: layout,
alloc: alloc,
}
}
}
impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> {
type Slab = SlabHeader;
fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> {
unsafe {
let color = self
.layout
.color_settings
.next_color(self.layout.layout.align());
let slab = self.alloc.alloc()?.cast();
ptr::write(
slab.as_ptr(),
SlabHeader {
stack: Stack::new(),
color: color,
next: None,
prev: None,
},
);
let stack_data_ptr = self.layout.stack_begin(slab);
for i in 0..self.layout.num_obj {
let ptr = self.layout.nth_obj(slab, color, i);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(ptr, I::status_uninitialized()));
}
self.data
.post_alloc(&self.layout, self.alloc.layout().size(), slab);
Some(slab)
}
}
fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) {
unsafe {
debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj);
self.data
.pre_dealloc(&self.layout, self.alloc.layout().size(), slab);
let stack_data_ptr = self.layout.stack_begin(slab);
for _ in 0..self.layout.num_obj {
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
I::drop(I::unpack_ptr(packed), I::unpack_status(packed));
}
self.alloc.dealloc(slab.cast());
}
}
fn is_full(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj }
}
fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == 0 }
}
fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) {
unsafe {
let stack_data_ptr = self.layout.stack_begin(slab);
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
(I::unpack_ptr(packed), I::unpack_status(packed))
}
}
fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool)
|
}
pub struct SlabHeader {
stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header
color: Color, // extra padding added before array beginning
next: Option<NonNull<SlabHeader>>,
prev: Option<NonNull<SlabHeader>>,
}
impl Linkable for SlabHeader {
fn next(&self) -> Option<NonNull<SlabHeader>> {
self.next
}
fn prev(&self) -> Option<NonNull<SlabHeader>> {
self.prev
}
fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) {
self.next = next;
}
fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) {
self.prev = prev;
}
}
impl SlabHeader {
pub fn get_color(&self) -> Color {
self.color
}
}
#[derive(Clone)]
pub struct Layout {
pub num_obj: usize,
pub layout: alloc::Layout,
pub stack_begin_offset: usize,
pub array_begin_offset: usize,
pub color_settings: ColorSettings,
}
impl Layout {
/// Determines whether an allocator can be constructed for T using the given slab size. If so,
/// it returns a constructed Layout for T using that slab size and the amount of unused space
/// left at the end of the slab (when no coloring is used).
pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> {
let obj_size = layout.size();
let obj_align = layout.align();
let hdr_size = mem::size_of::<SlabHeader>();
// padding between the SlabHeader and the base of the pointer stack
let pre_stack_padding = Stack::<usize>::padding_after(hdr_size);
let stack_begin_offset = hdr_size + pre_stack_padding;
// Find the largest number of objects we can fit in the slab. array_begin_offset is the
// offset from the beginning of the slab of the array of objects.
let (mut num_obj, mut array_begin_offset) = (0, 0);
loop {
let candidate = num_obj + 1;
// total_hdr_size =
|
{
unsafe {
let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj);
let was_empty = (*slab.as_ptr()).stack.size() == 0;
let stack_data_ptr = self.layout.stack_begin(slab);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(obj, init_status));
(slab, was_empty)
}
}
|
identifier_body
|
Total_ALE.py
|
"""
Write boundary conditions, equations and create the files for solution.
"""
self.mesh = mesh
self.dt = Constant(dt)
self.theta = theta
self.t = 0.0
self.v_max = v_max
self.mu_f = mu_f
self.rho_f = rho_f
self.lambda_s = lambda_s
self.mu_s = mu_s
self.rho_s = rho_s
self.bndry = bndry
self.interface = interface
# bounding box tree
self.bb = BoundingBoxTree()
self.bb.build(self.mesh)
# Define finite elements
eV = VectorElement("CG", mesh.ufl_cell(), 2) # velocity element
eB = VectorElement("Bubble", mesh.ufl_cell(), mesh.geometry().dim()+1) # Bubble element
eU = VectorElement("CG", mesh.ufl_cell(), 2) # displacement element
eP = FiniteElement("DG", mesh.ufl_cell(), 1) # pressure element
eW = MixedElement([eV, eB, eU, eB, eP]) # final mixed element
W = FunctionSpace(self.mesh, eW) # mixed space
self.W = W
self.V = FunctionSpace(self.mesh, eV)
# Set boundary conditions
self.v_in = Expression(("t<2.0? 0.5*(1.0 - cos(0.5*pi*t))*v_max*4/(gW*gW)*(x[1]*(gW - x[1])): \
v_max*4/(gW*gW)*(x[1]*(gW - x[1]))", "0.0"),
degree = 2, v_max = Constant(self.v_max), gW = Constant(gW), t = self.t)
#info("Expression set.")
bc_v_in = DirichletBC(self.W.sub(0), self.v_in, bndry, _INFLOW)
bc_v_walls = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _WALLS)
bc_v_circle = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _CIRCLE)
bc_u_in = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _INFLOW)
bc_u_circle = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _CIRCLE)
bc_u_walls = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _WALLS)
bc_u_out = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _OUTFLOW)
self.bcs = [bc_v_in, bc_v_walls, bc_v_circle, bc_u_in, bc_u_walls, bc_u_circle, bc_u_out]
#info("Mesh BC.")
bc_mesh = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), interface, _FSI)
self.bcs_mesh = [bc_mesh]
#info("Normal and Circumradius.")
self.n = FacetNormal(self.mesh)
self.h = Circumradius(self.mesh)
I = Identity(self.W.mesh().geometry().dim())
# Define functions
self.w = Function(self.W) # solution to current time step
self.w0 = Function(self.W) # solution from previous time step
(v__, bv_, u__, bu_, p_) = TestFunctions(self.W)
# sum bubble elements with corresponding Lagrange elements
v_ = v__ + bv_
u_ = u__ + bu_
(v, bv, u, bu, self.p) = split(self.w)
self.v = v + bv
self.u = u + bu
(v0, bv0, u0, bu0, self.p0) = split(self.w0)
self.v0 = v0 + bv0
self.u0 = u0 + bu0
# define deformation gradient, Jacobian
self.FF = I + grad(self.u)
self.FF0 = I + grad(self.u0)
self.JJ = det(self.FF)
self.JJ0 = det(self.FF0)
# write ALE mesh movement
self.gamma = 9.0/8.0
h = CellVolume(self.mesh)**(self.gamma)
E = Constant(1.0)
E_mesh = E/h
nu_mesh = Constant(-0.02)
mu_mesh = E_mesh/(2*(1.0+nu_mesh))
lambda_mesh = (nu_mesh*E_mesh)/((1+nu_mesh)*(1-2*nu_mesh))
F_mesh = inner(mu_mesh*2*sym(grad(self.u)), grad(u_))*dx(0) \
+ lambda_mesh*inner(div(self.u), div(u_))*dx(0)
# define referential Grad and Div shortcuts
def Grad(f, F): return dot( grad(f), inv(F) )
def Div(f, F): return
|
# approximate time derivatives
du = (1.0/self.dt)*(self.u - self.u0)
dv = (1.0/self.dt)*(self.v - self.v0)
# compute velocuty part of Cauchy stress tensor for fluid
self.T_f = -self.p*I + 2*self.mu_f*sym(Grad(self.v, self.FF))
self.T_f0 = -self.p*I + 2*self.mu_f*sym(Grad(self.v0, self.FF0))
# Compute 1st Piola-Kirhhoff tensro for fluid
# - for computing surface integrals for forces in postprocessing
self.S_f = self.JJ *self.T_f*inv(self.FF).T
# write equations for fluid
a_fluid = inner(self.T_f , Grad(v_, self.FF))*self.JJ*dx(0) \
- inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \
+ inner(self.rho_f*Grad(self.v, self.FF )*(self.v - du), v_)*self.JJ*dx(0)
a_fluid0 = inner(self.T_f0, Grad(v_, self.FF0))*self.JJ0*dx(0) \
- inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \
+ inner(self.rho_f*Grad(self.v0, self.FF0)*(self.v0 - du), v_)*self.JJ0*dx(0)
b_fluid = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)
b_fluid0 = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)
self.F_fluid = (self.theta*self.JJ+(1.0 - self.theta)*self.JJ0)*self.rho_f*inner(dv, v_)*dx(0)\
+ self.theta*(a_fluid + b_fluid) + (1.0 - self.theta)*(a_fluid0 + b_fluid0) \
+ F_mesh
# compute 1st Piola-Kirchhoff tensor for solid (St. Vennant - Kirchhoff model)
B_s = self.FF.T *self.FF
B_s0 = self.FF0.T*self.FF0
S_s = self.FF *(0.5*self.lambda_s*tr(B_s - I)*I + self.mu_s*(B_s - I))
S_s0 = self.FF0*(0.5*self.lambda_s*tr(B_s0 - I)*I + self.mu_s*(B_s0 - I))
# write equation for solid
alpha = Constant(1.0) # Constant(1e10) #
self.F_solid = rho_s*inner(dv, v_)*dx(1) \
+ self.theta*inner(S_s , grad(v_))*dx(1) + (1.0 - self.theta)*inner(S_s0, grad(v_))*dx(1) \
+ alpha*inner(du - (self.theta*self.v + (1.0 - self.theta)*self.v0), u_)*dx(1)
dF_solid = derivative(self.F_solid, self.w)
dF_fluid = derivative(self.F_fluid, self.w)
self.problem = Problem(self.F_fluid, self.F_solid, dF_fluid, dF_solid, self.bcs_mesh, self.bcs)
self.solver = NewtonSolver()
# configure solver parameters
self.solver.parameters['relative_tolerance'] = 1e-6
self.solver.parameters['maximum_iterations'] = 15
self.solver.parameters['linear_solver'] = 'mumps'
# create files for saving
if my
|
tr( Grad(f, F) )
|
identifier_body
|
Total_ALE.py
|
\
+ alpha*inner(du - (self.theta*self.v + (1.0 - self.theta)*self.v0), u_)*dx(1)
dF_solid = derivative(self.F_solid, self.w)
dF_fluid = derivative(self.F_fluid, self.w)
self.problem = Problem(self.F_fluid, self.F_solid, dF_fluid, dF_solid, self.bcs_mesh, self.bcs)
self.solver = NewtonSolver()
# configure solver parameters
self.solver.parameters['relative_tolerance'] = 1e-6
self.solver.parameters['maximum_iterations'] = 15
self.solver.parameters['linear_solver'] = 'mumps'
# create files for saving
if my_rank == 0:
if not os.path.exists(result):
os.makedirs(result)
self.vfile = XDMFFile("%s/velocity.xdmf" % result)
self.ufile = XDMFFile("%s/displacement.xdmf" % result)
self.pfile = XDMFFile("%s/pressure.xdmf" % result)
self.sfile = XDMFFile("%s/stress.xdmf" % result)
self.vfile.parameters["flush_output"] = True
self.ufile.parameters["flush_output"] = True
self.pfile.parameters["flush_output"] = True
self.sfile.parameters["flush_output"] = True
with open(result+'/data.csv', 'w') as data_file:
writer = csv.writer(data_file, delimiter=';', lineterminator='\n')
writer.writerow(['time', 'mean pressure on outflow', 'pressure_jump',
'x-coordinate of end of beam', 'y-coordinate of end of beam',
'pressure difference',
'drag_circle', 'drag_fluid', 'drag_solid', 'drag_fullfluid',
'lift_circle', 'lift_fluid', 'lift_solid', 'lift_fullfluid'])
def solve(self, t, dt):
self.t = t
self.v_in.t = t
self.dt = Constant(dt)
self.solver.solve(self.problem, self.w.vector())
self.w0.assign(self.w)
def save(self, t):
(v, b1, u, b2, p) = self.w.split()
v.rename("v", "velocity")
u.rename("u", "displacement")
p.rename("p", "pressure")
self.vfile.write(v, t)
self.ufile.write(u, t)
self.pfile.write(p, t)
P = assemble(self.p*ds(_OUTFLOW))/gW
PI = assemble(abs(jump(self.p))*dS(_FSI))
# Compute drag and lift
force = dot(self.S_f, self.n)
D_C = -assemble(force[0]*dss(_FLUID_CYLINDER))
L_C = -assemble(force[1]*dss(_FLUID_CYLINDER))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
D_F = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
L_F = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
D_S = assemble(action(self.F_solid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
L_S = assemble(action(self.F_solid,w_))
w_ = Function(self.W)
Fbc1 = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FLUID_CYLINDER)
Fbc2 = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc1.apply(w_.vector())
Fbc2.apply(w_.vector())
D_FF = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc1 = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FLUID_CYLINDER)
Fbc2 = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc1.apply(w_.vector())
Fbc2.apply(w_.vector())
L_FF = -assemble(action(self.F_fluid,w_))
# MPI trick to extract displacement of the end of the beam
self.w.set_allow_extrapolation(True)
pA_loc = self.p((A.x(), A.y()))
pB_loc = self.p((B.x(), B.y()))
pB_loc = self.p((B.x(), B.y()))
Ax_loc = self.u[0]((A.x(), A.y()))
Ay_loc = self.u[1]((A.x(), A.y()))
self.w.set_allow_extrapolation(False)
pi = 0
if self.bb.compute_first_collision(A) < 4294967295:
pi = 1
else:
pA_loc = 0.0
Ax_loc = 0.0
Ay_loc = 0.0
pA = MPI.sum(comm, pA_loc) / MPI.sum(comm, pi)
Ax = MPI.sum(comm, Ax_loc) / MPI.sum(comm, pi)
Ay = MPI.sum(comm, Ay_loc) / MPI.sum(comm, pi)
pi = 0
if self.bb.compute_first_collision(B) < 4294967295:
pi = 1
else:
pB_loc = 0.0
pB = MPI.sum(comm, pB_loc) / MPI.sum(comm, pi)
p_diff = pB - pA
# write computed quantities to a csv file
if my_rank == 0:
with open(result+'/data.csv', 'a') as data_file:
writer = csv.writer(data_file, delimiter=';', lineterminator='\n')
writer.writerow([t, P, PI, Ax, Ay, p_diff, D_C, D_F, D_S, D_FF, L_C, L_F, L_S, L_FF])
def get_benchmark_specification(benchmark = 'FSI1'):
"""
Method for obtaining the right problem-specific constants.
"""
if benchmark == 'FSI1':
rho_s = Constant(1e03)
nu_s = Constant(0.4)
mu_s = Constant(5e05)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 0.2
T_end = 60.0
result = "results-FSI1/"
elif benchmark == 'FSI2':
rho_s = Constant(1e04)
nu_s = Constant(0.4)
mu_s = Constant(5e05)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 1.0
T_end = 15.0
result = "results-FSI2/"
elif benchmark == 'FSI3':
rho_s = Constant(1e03)
nu_s = Constant(0.4)
mu_s = Constant(2e06)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 2.0
T_end = 20.0
result = "results-FSI3/"
else:
raise ValueError('"{}" is a wrong name for problem specification.'.format(benchmark))
v_max = Constant(1.5*U) # mean velocity to maximum velocity
# (we have parabolic profile)
E_s = Constant(2*mu_s*(1+nu_s))
lambda_s = Constant((nu_s*E_s)/((1+nu_s)*(1-2*nu_s)))
mu_f = Constant(nu_f*rho_f)
return v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, T_end, result
# set problem and its discretization
parser = OptionParser()
parser.add_option("--benchmark", dest="benchmark", default='FSI2')
parser.add_option("--mesh", dest="mesh_name", default='mesh_ALE_L1')
parser.add_option("--dt", dest="dt", default='0.001')
parser.add_option("--dt_scheme", dest="dt_scheme", default='CN') # BE BE_CN
(options, args) = parser.parse_args()
|
# name of benchmark
benchmark = options.benchmark
|
random_line_split
|
|
Total_ALE.py
|
2, p) = self.w.split()
v.rename("v", "velocity")
u.rename("u", "displacement")
p.rename("p", "pressure")
self.vfile.write(v, t)
self.ufile.write(u, t)
self.pfile.write(p, t)
P = assemble(self.p*ds(_OUTFLOW))/gW
PI = assemble(abs(jump(self.p))*dS(_FSI))
# Compute drag and lift
force = dot(self.S_f, self.n)
D_C = -assemble(force[0]*dss(_FLUID_CYLINDER))
L_C = -assemble(force[1]*dss(_FLUID_CYLINDER))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
D_F = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
L_F = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
D_S = assemble(action(self.F_solid,w_))
w_ = Function(self.W)
Fbc = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc.apply(w_.vector())
L_S = assemble(action(self.F_solid,w_))
w_ = Function(self.W)
Fbc1 = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FLUID_CYLINDER)
Fbc2 = DirichletBC(self.W.sub(0), Constant((1.0, 0.0)), self.interface, _FSI)
Fbc1.apply(w_.vector())
Fbc2.apply(w_.vector())
D_FF = -assemble(action(self.F_fluid,w_))
w_ = Function(self.W)
Fbc1 = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FLUID_CYLINDER)
Fbc2 = DirichletBC(self.W.sub(0), Constant((0.0, 1.0)), self.interface, _FSI)
Fbc1.apply(w_.vector())
Fbc2.apply(w_.vector())
L_FF = -assemble(action(self.F_fluid,w_))
# MPI trick to extract displacement of the end of the beam
self.w.set_allow_extrapolation(True)
pA_loc = self.p((A.x(), A.y()))
pB_loc = self.p((B.x(), B.y()))
pB_loc = self.p((B.x(), B.y()))
Ax_loc = self.u[0]((A.x(), A.y()))
Ay_loc = self.u[1]((A.x(), A.y()))
self.w.set_allow_extrapolation(False)
pi = 0
if self.bb.compute_first_collision(A) < 4294967295:
pi = 1
else:
pA_loc = 0.0
Ax_loc = 0.0
Ay_loc = 0.0
pA = MPI.sum(comm, pA_loc) / MPI.sum(comm, pi)
Ax = MPI.sum(comm, Ax_loc) / MPI.sum(comm, pi)
Ay = MPI.sum(comm, Ay_loc) / MPI.sum(comm, pi)
pi = 0
if self.bb.compute_first_collision(B) < 4294967295:
pi = 1
else:
pB_loc = 0.0
pB = MPI.sum(comm, pB_loc) / MPI.sum(comm, pi)
p_diff = pB - pA
# write computed quantities to a csv file
if my_rank == 0:
with open(result+'/data.csv', 'a') as data_file:
writer = csv.writer(data_file, delimiter=';', lineterminator='\n')
writer.writerow([t, P, PI, Ax, Ay, p_diff, D_C, D_F, D_S, D_FF, L_C, L_F, L_S, L_FF])
def get_benchmark_specification(benchmark = 'FSI1'):
"""
Method for obtaining the right problem-specific constants.
"""
if benchmark == 'FSI1':
rho_s = Constant(1e03)
nu_s = Constant(0.4)
mu_s = Constant(5e05)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 0.2
T_end = 60.0
result = "results-FSI1/"
elif benchmark == 'FSI2':
rho_s = Constant(1e04)
nu_s = Constant(0.4)
mu_s = Constant(5e05)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 1.0
T_end = 15.0
result = "results-FSI2/"
elif benchmark == 'FSI3':
rho_s = Constant(1e03)
nu_s = Constant(0.4)
mu_s = Constant(2e06)
rho_f = Constant(1e03)
nu_f = Constant(1e-03)
U = 2.0
T_end = 20.0
result = "results-FSI3/"
else:
raise ValueError('"{}" is a wrong name for problem specification.'.format(benchmark))
v_max = Constant(1.5*U) # mean velocity to maximum velocity
# (we have parabolic profile)
E_s = Constant(2*mu_s*(1+nu_s))
lambda_s = Constant((nu_s*E_s)/((1+nu_s)*(1-2*nu_s)))
mu_f = Constant(nu_f*rho_f)
return v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, T_end, result
# set problem and its discretization
parser = OptionParser()
parser.add_option("--benchmark", dest="benchmark", default='FSI2')
parser.add_option("--mesh", dest="mesh_name", default='mesh_ALE_L1')
parser.add_option("--dt", dest="dt", default='0.001')
parser.add_option("--dt_scheme", dest="dt_scheme", default='CN') # BE BE_CN
(options, args) = parser.parse_args()
# name of benchmark
benchmark = options.benchmark
# name of mesh
mesh_name = options.mesh_name
relative_path_to_mesh = 'meshes/'+mesh_name+'.h5'
# time step size
dt = options.dt
# time stepping scheme
dt_scheme = options.dt_scheme
# choose theta according to dt_scheme
if dt_scheme in ['BE', 'BE_CN']:
theta = Constant(1.0)
elif dt_scheme == 'CN':
theta = Constant(0.5)
else:
raise ValueError('Invalid argument for dt_scheme')
v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, t_end, result = get_benchmark_specification(benchmark)
result = result + 'dt_' + str(dt) + '/' + dt_scheme + '/' + mesh_name[:-3] + '/' + mesh_name[-2:]
# load mesh with boundary and domain markers
sys.path.append('../meshes')
import marker
#(mesh, bndry, domains, interface, A, B) \
# = marker.give_marked_mesh(mesh_coarseness = mesh_coarseness, refinement = True, ALE = True)
(mesh, bndry, domains, interface, A, B) = marker.give_gmsh_mesh(relative_path_to_mesh)
# domain (used while building mesh) - needed for inflow condition
gW = 0.41
# boundary marks' names (already setted to the mesh) - needed for boundary conditions
_INFLOW = 1
_WALLS = 2
_CIRCLE = 3
_OUTFLOW = 4
# interface marks
_FSI = 1
_FLUID_CYLINDER = 2
dx = dx(domain=mesh, subdomain_data = domains)
ds = ds(domain=mesh, subdomain_data = bndry)
dss = ds(domain=mesh, subdomain_data = interface)
dS = dS(domain=mesh, subdomain_data = interface)
flow = Flow(mesh, bndry, interface, dt, theta, v_max, lambda_s, mu_s, rho_s, mu_f, rho_f, result)
t = 0.0
while t < 2.0:
if my_rank == 0:
info("
|
t = %.4f, t_end = %.1f" % (t, t_end))
f
|
conditional_block
|
|
Total_ALE.py
|
"""
Write boundary conditions, equations and create the files for solution.
"""
self.mesh = mesh
self.dt = Constant(dt)
self.theta = theta
self.t = 0.0
self.v_max = v_max
self.mu_f = mu_f
self.rho_f = rho_f
self.lambda_s = lambda_s
self.mu_s = mu_s
self.rho_s = rho_s
self.bndry = bndry
self.interface = interface
# bounding box tree
self.bb = BoundingBoxTree()
self.bb.build(self.mesh)
# Define finite elements
eV = VectorElement("CG", mesh.ufl_cell(), 2) # velocity element
eB = VectorElement("Bubble", mesh.ufl_cell(), mesh.geometry().dim()+1) # Bubble element
eU = VectorElement("CG", mesh.ufl_cell(), 2) # displacement element
eP = FiniteElement("DG", mesh.ufl_cell(), 1) # pressure element
eW = MixedElement([eV, eB, eU, eB, eP]) # final mixed element
W = FunctionSpace(self.mesh, eW) # mixed space
self.W = W
self.V = FunctionSpace(self.mesh, eV)
# Set boundary conditions
self.v_in = Expression(("t<2.0? 0.5*(1.0 - cos(0.5*pi*t))*v_max*4/(gW*gW)*(x[1]*(gW - x[1])): \
v_max*4/(gW*gW)*(x[1]*(gW - x[1]))", "0.0"),
degree = 2, v_max = Constant(self.v_max), gW = Constant(gW), t = self.t)
#info("Expression set.")
bc_v_in = DirichletBC(self.W.sub(0), self.v_in, bndry, _INFLOW)
bc_v_walls = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _WALLS)
bc_v_circle = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _CIRCLE)
bc_u_in = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _INFLOW)
bc_u_circle = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _CIRCLE)
bc_u_walls = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _WALLS)
bc_u_out = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _OUTFLOW)
self.bcs = [bc_v_in, bc_v_walls, bc_v_circle, bc_u_in, bc_u_walls, bc_u_circle, bc_u_out]
#info("Mesh BC.")
bc_mesh = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), interface, _FSI)
self.bcs_mesh = [bc_mesh]
#info("Normal and Circumradius.")
self.n = FacetNormal(self.mesh)
self.h = Circumradius(self.mesh)
I = Identity(self.W.mesh().geometry().dim())
# Define functions
self.w = Function(self.W) # solution to current time step
self.w0 = Function(self.W) # solution from previous time step
(v__, bv_, u__, bu_, p_) = TestFunctions(self.W)
# sum bubble elements with corresponding Lagrange elements
v_ = v__ + bv_
u_ = u__ + bu_
(v, bv, u, bu, self.p) = split(self.w)
self.v = v + bv
self.u = u + bu
(v0, bv0, u0, bu0, self.p0) = split(self.w0)
self.v0 = v0 + bv0
self.u0 = u0 + bu0
# define deformation gradient, Jacobian
self.FF = I + grad(self.u)
self.FF0 = I + grad(self.u0)
self.JJ = det(self.FF)
self.JJ0 = det(self.FF0)
# write ALE mesh movement
self.gamma = 9.0/8.0
h = CellVolume(self.mesh)**(self.gamma)
E = Constant(1.0)
E_mesh = E/h
nu_mesh = Constant(-0.02)
mu_mesh = E_mesh/(2*(1.0+nu_mesh))
lambda_mesh = (nu_mesh*E_mesh)/((1+nu_mesh)*(1-2*nu_mesh))
F_mesh = inner(mu_mesh*2*sym(grad(self.u)), grad(u_))*dx(0) \
+ lambda_mesh*inner(div(self.u), div(u_))*dx(0)
# define referential Grad and Div shortcuts
def Grad(f
|
: return dot( grad(f), inv(F) )
def Div(f, F): return tr( Grad(f, F) )
# approximate time derivatives
du = (1.0/self.dt)*(self.u - self.u0)
dv = (1.0/self.dt)*(self.v - self.v0)
# compute velocuty part of Cauchy stress tensor for fluid
self.T_f = -self.p*I + 2*self.mu_f*sym(Grad(self.v, self.FF))
self.T_f0 = -self.p*I + 2*self.mu_f*sym(Grad(self.v0, self.FF0))
# Compute 1st Piola-Kirhhoff tensro for fluid
# - for computing surface integrals for forces in postprocessing
self.S_f = self.JJ *self.T_f*inv(self.FF).T
# write equations for fluid
a_fluid = inner(self.T_f , Grad(v_, self.FF))*self.JJ*dx(0) \
- inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \
+ inner(self.rho_f*Grad(self.v, self.FF )*(self.v - du), v_)*self.JJ*dx(0)
a_fluid0 = inner(self.T_f0, Grad(v_, self.FF0))*self.JJ0*dx(0) \
- inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \
+ inner(self.rho_f*Grad(self.v0, self.FF0)*(self.v0 - du), v_)*self.JJ0*dx(0)
b_fluid = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)
b_fluid0 = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)
self.F_fluid = (self.theta*self.JJ+(1.0 - self.theta)*self.JJ0)*self.rho_f*inner(dv, v_)*dx(0)\
+ self.theta*(a_fluid + b_fluid) + (1.0 - self.theta)*(a_fluid0 + b_fluid0) \
+ F_mesh
# compute 1st Piola-Kirchhoff tensor for solid (St. Vennant - Kirchhoff model)
B_s = self.FF.T *self.FF
B_s0 = self.FF0.T*self.FF0
S_s = self.FF *(0.5*self.lambda_s*tr(B_s - I)*I + self.mu_s*(B_s - I))
S_s0 = self.FF0*(0.5*self.lambda_s*tr(B_s0 - I)*I + self.mu_s*(B_s0 - I))
# write equation for solid
alpha = Constant(1.0) # Constant(1e10) #
self.F_solid = rho_s*inner(dv, v_)*dx(1) \
+ self.theta*inner(S_s , grad(v_))*dx(1) + (1.0 - self.theta)*inner(S_s0, grad(v_))*dx(1) \
+ alpha*inner(du - (self.theta*self.v + (1.0 - self.theta)*self.v0), u_)*dx(1)
dF_solid = derivative(self.F_solid, self.w)
dF_fluid = derivative(self.F_fluid, self.w)
self.problem = Problem(self.F_fluid, self.F_solid, dF_fluid, dF_solid, self.bcs_mesh, self.bcs)
self.solver = NewtonSolver()
# configure solver parameters
self.solver.parameters['relative_tolerance'] = 1e-6
self.solver.parameters['maximum_iterations'] = 15
self.solver.parameters['linear_solver'] = 'mumps'
# create files for saving
if my
|
, F)
|
identifier_name
|
iscsi.go
|
); err != nil {
return errors.Wrapf(err, "failed to umount %s", mountPoint)
}
// logout portal
targetIqn := o.tgtTargetIqn(snID, snKey)
out, err := exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--logout").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ENOOBJSFOUND {
return errors.Wrapf(err, "failed to logout a portal on a target %s: %s", targetIqn, out)
}
}
// delete the portal
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "delete").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ENOOBJSFOUND {
return errors.Wrapf(err, "failed to delete a portal on a target %s: %s", targetIqn, out)
}
}
// delete the target
out, err = exec.CommandContext(ctx, "tgt-admin", "--delete", targetIqn).CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to delete target %s: %s", targetIqn, out)
}
return nil
}
// attachAndMountBlockDevice
//
// TODO(fuweid): need to track the middle state if the process has been killed.
func (o *snapshotter) attachAndMountBlockDevice(ctx context.Context, snID string, snKey string, writable bool) (retErr error) {
if err := lookup(o.tgtTargetMountpoint(snID)); err == nil {
return nil
}
// If the target already exists, it won't be processed, see man TGT-ADMIN(8)
targetConfPath := o.tgtTargetConfPath(snID, snKey)
out, err := exec.CommandContext(ctx, "tgt-admin", "-e", "-c", targetConfPath).CombinedOutput()
if err != nil {
// read the init-debug.log for readable
debugLogPath := o.tgtOverlayBDInitDebuglogPath(snID)
if data, derr := ioutil.ReadFile(debugLogPath); derr == nil {
return errors.Wrapf(err, "failed to create target by tgt-admin: %s, more detail in %s", out, data)
}
return errors.Wrapf(err, "failed to create target by tgt-admin: %s", out)
}
targetIqn := o.tgtTargetIqn(snID, snKey)
defer func() {
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(ctx, "tgt-admin", "--delete", targetIqn).CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback target by tgt-admin: %s", out)
}
}
}()
// Add a portal on a target
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "new").CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to add a portal on a target %s: %s", targetIqn, out)
}
defer func() {
// rollback the portal
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(deferCtx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "delete").CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback a portal on a target %s: %s", targetIqn, out)
}
}
}()
// Login a portal on a target
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--login").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ESESSEXISTS {
return errors.Wrapf(err, "failed to login a portal on a target %s: %s", targetIqn, out)
}
}
defer func() {
// NOTE(fuweid): Basically, do login only once. The rollback doesn't impact other running portal.
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(deferCtx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--logout").CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback to logout on a target %s: %s", targetIqn, out)
}
}
}()
// Find the session and hostNumber mapping
hostToSessionID, err := iscsi.GetISCSIHostSessionMapForTarget(targetIqn, defaultPortal)
if err != nil {
return errors.Wrapf(err, "failed to get hostNumber->SessionID mapping for %s", targetIqn)
}
if len(hostToSessionID) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// The device doesn't show up instantly. Need retry here.
var lastErr error = nil
var mountPoint = o.tgtTargetMountpoint(snID)
for i := 1; i <= maxAttachAttempts; i++ {
for hostNumber, sessionIDs := range hostToSessionID {
if len(sessionIDs) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// Assume that both channelID and targetID are zero.
devices, err := iscsi.GetDevicesForTarget(targetIqn, hostNumber, sessionIDs[0], 0, 0)
if err != nil {
return err
}
if len(devices) != 1 {
lastErr = errors.Errorf("unexpected devices %v for %s", devices, targetIqn)
break
}
var mflag uintptr = unix.MS_RDONLY
if writable {
mflag = 0
}
// TODO(fuweid): how to support multiple filesystem?
if err := unix.Mount(devices[0], mountPoint, "ext4", mflag, ""); err != nil {
return errors.Wrapf(err, "failed to mount the device %s on %s", devices[0], mountPoint)
}
lastErr = nil
}
if lastErr == nil {
break
}
time.Sleep(1 * time.Second)
}
return lastErr
}
// constructOverlayBDSpec generates the config spec for OverlayBD backing store.
func (o *snapshotter) constructOverlayBDSpec(ctx context.Context, key string, writable bool) error
|
return err
}
configJSON.Lowers = parentConfJSON.Lowers
}
switch stype {
case storageTypeRemoteBlock:
if writable {
return errors.Errorf("remote block device is readonly, not support writable")
}
blobSize, err := strconv.Atoi(info.Labels[labelKeyOverlayBDBlobSize])
if err != nil {
return errors.Wrapf(err, "failed to parse value of label %s of snapshot %s", labelKeyOverlayBDBlobSize, key)
}
blobDigest := info.Labels[labelKeyOverlayBDBlobDigest]
blobPrefixURL, err := o.constructImageBlobURL(info.Labels[labelKeyImageRef])
if err != nil {
return errors.Wrapf(err, "failed to construct image blob prefix url for snapshot %s", key)
}
configJSON.RepoBlobURL = blobPrefixURL
configJSON.Lowers = append(configJSON.Lowers, OverlayBDBSConfigLower{
Digest: blobDigest,
Size: int64(blobSize),
|
{
id, info, _, err := storage.GetInfo(ctx, key)
if err != nil {
return errors.Wrapf(err, "failed to get info for snapshot %s", key)
}
stype, err := o.identifySnapshotStorageType(id, info)
if err != nil {
return errors.Wrapf(err, "failed to identify storage of snapshot %s", key)
}
configJSON := OverlayBDBSConfig{
Lowers: []OverlayBDBSConfigLower{},
ResultFile: o.tgtOverlayBDInitDebuglogPath(id),
}
// load the parent's config and reuse the lowerdir
if info.Parent != "" {
parentConfJSON, err := o.loadBackingStoreConfig(ctx, info.Parent)
if err != nil {
|
identifier_body
|
iscsi.go
|
maxAttachAttempts = 10
defaultRollbackTimeout = 30 * time.Second
)
// OverlayBDBSConfig is the config of OverlayBD backing store in open-iscsi target.
type OverlayBDBSConfig struct {
RepoBlobURL string `json:"repoBlobUrl"`
Lowers []OverlayBDBSConfigLower `json:"lowers"`
Upper OverlayBDBSConfigUpper `json:"upper"`
ResultFile string `json:"resultFile"`
}
// OverlayBDBSConfigLower
type OverlayBDBSConfigLower struct {
File string `json:"file,omitempty"`
Digest string `json:"digest,omitempty"`
Size int64 `json:"size,omitempty"`
Dir string `json:"dir,omitempty"`
}
type OverlayBDBSConfigUpper struct {
Index string `json:"index,omitempty"`
Data string `json:"data,omitempty"`
}
// unmountAndDetachBlockDevice
func (o *snapshotter) unmountAndDetachBlockDevice(ctx context.Context, snID string, snKey string) error {
mountPoint := o.tgtTargetMountpoint(snID)
if err := mount.UnmountAll(mountPoint, 0); err != nil {
return errors.Wrapf(err, "failed to umount %s", mountPoint)
}
// logout portal
targetIqn := o.tgtTargetIqn(snID, snKey)
out, err := exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--logout").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ENOOBJSFOUND {
return errors.Wrapf(err, "failed to logout a portal on a target %s: %s", targetIqn, out)
}
}
// delete the portal
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "delete").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ENOOBJSFOUND {
return errors.Wrapf(err, "failed to delete a portal on a target %s: %s", targetIqn, out)
}
}
// delete the target
out, err = exec.CommandContext(ctx, "tgt-admin", "--delete", targetIqn).CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to delete target %s: %s", targetIqn, out)
}
return nil
}
// attachAndMountBlockDevice
//
// TODO(fuweid): need to track the middle state if the process has been killed.
func (o *snapshotter) attachAndMountBlockDevice(ctx context.Context, snID string, snKey string, writable bool) (retErr error) {
if err := lookup(o.tgtTargetMountpoint(snID)); err == nil {
return nil
}
// If the target already exists, it won't be processed, see man TGT-ADMIN(8)
targetConfPath := o.tgtTargetConfPath(snID, snKey)
out, err := exec.CommandContext(ctx, "tgt-admin", "-e", "-c", targetConfPath).CombinedOutput()
if err != nil {
// read the init-debug.log for readable
debugLogPath := o.tgtOverlayBDInitDebuglogPath(snID)
if data, derr := ioutil.ReadFile(debugLogPath); derr == nil {
return errors.Wrapf(err, "failed to create target by tgt-admin: %s, more detail in %s", out, data)
}
return errors.Wrapf(err, "failed to create target by tgt-admin: %s", out)
}
targetIqn := o.tgtTargetIqn(snID, snKey)
defer func() {
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(ctx, "tgt-admin", "--delete", targetIqn).CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback target by tgt-admin: %s", out)
}
}
}()
// Add a portal on a target
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "new").CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to add a portal on a target %s: %s", targetIqn, out)
}
defer func() {
// rollback the portal
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(deferCtx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "delete").CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback a portal on a target %s: %s", targetIqn, out)
}
}
}()
// Login a portal on a target
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--login").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ESESSEXISTS {
return errors.Wrapf(err, "failed to login a portal on a target %s: %s", targetIqn, out)
}
}
defer func() {
// NOTE(fuweid): Basically, do login only once. The rollback doesn't impact other running portal.
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(deferCtx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--logout").CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback to logout on a target %s: %s", targetIqn, out)
}
}
}()
// Find the session and hostNumber mapping
hostToSessionID, err := iscsi.GetISCSIHostSessionMapForTarget(targetIqn, defaultPortal)
if err != nil {
return errors.Wrapf(err, "failed to get hostNumber->SessionID mapping for %s", targetIqn)
}
if len(hostToSessionID) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// The device doesn't show up instantly. Need retry here.
var lastErr error = nil
var mountPoint = o.tgtTargetMountpoint(snID)
for i := 1; i <= maxAttachAttempts; i++ {
for hostNumber, sessionIDs := range hostToSessionID {
if len(sessionIDs) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// Assume that both channelID and targetID are zero.
devices, err := iscsi.GetDevicesForTarget(targetIqn, hostNumber, sessionIDs[0], 0, 0)
if err != nil {
return err
}
if len(devices) != 1 {
lastErr = errors.Errorf("unexpected devices %v for %s", devices, targetIqn)
break
}
var mflag uintptr = unix.MS_RDONLY
if writable {
mflag = 0
}
// TODO(fuweid): how to support multiple filesystem?
if err := unix.Mount(devices[0], mountPoint, "ext4", mflag, ""); err != nil {
return errors.Wrapf(err, "failed to mount the device %s on %s", devices[0], mountPoint)
}
lastErr = nil
}
if lastErr == nil {
break
}
time.Sleep(1 * time.Second)
}
return lastErr
}
// constructOverlayBDSpec generates the config spec for OverlayBD backing store.
func (o *snapshotter) constructOverlayBDSpec(ctx context.Context, key string, writable bool) error {
id, info, _, err := storage.GetInfo(ctx, key)
if err != nil {
return errors.Wrapf(err, "failed to get info for snapshot %s", key)
}
stype, err := o.identifySnapshotStorageType(id, info)
if err != nil {
return errors.Wrapf(err, "failed to identify storage of snapshot %s", key)
}
configJSON := OverlayBDBSConfig{
Lowers:
|
defaultInitiatorAddress = "127.0.0.1"
defaultInitiatorPort = "3260"
defaultPortal = defaultInitiatorAddress + ":" + defaultInitiatorPort
|
random_line_split
|
|
iscsi.go
|
); err != nil {
return errors.Wrapf(err, "failed to umount %s", mountPoint)
}
// logout portal
targetIqn := o.tgtTargetIqn(snID, snKey)
out, err := exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--logout").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ENOOBJSFOUND {
return errors.Wrapf(err, "failed to logout a portal on a target %s: %s", targetIqn, out)
}
}
// delete the portal
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "delete").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ENOOBJSFOUND {
return errors.Wrapf(err, "failed to delete a portal on a target %s: %s", targetIqn, out)
}
}
// delete the target
out, err = exec.CommandContext(ctx, "tgt-admin", "--delete", targetIqn).CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to delete target %s: %s", targetIqn, out)
}
return nil
}
// attachAndMountBlockDevice
//
// TODO(fuweid): need to track the middle state if the process has been killed.
func (o *snapshotter) attachAndMountBlockDevice(ctx context.Context, snID string, snKey string, writable bool) (retErr error) {
if err := lookup(o.tgtTargetMountpoint(snID)); err == nil {
return nil
}
// If the target already exists, it won't be processed, see man TGT-ADMIN(8)
targetConfPath := o.tgtTargetConfPath(snID, snKey)
out, err := exec.CommandContext(ctx, "tgt-admin", "-e", "-c", targetConfPath).CombinedOutput()
if err != nil {
// read the init-debug.log for readable
debugLogPath := o.tgtOverlayBDInitDebuglogPath(snID)
if data, derr := ioutil.ReadFile(debugLogPath); derr == nil {
return errors.Wrapf(err, "failed to create target by tgt-admin: %s, more detail in %s", out, data)
}
return errors.Wrapf(err, "failed to create target by tgt-admin: %s", out)
}
targetIqn := o.tgtTargetIqn(snID, snKey)
defer func() {
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(ctx, "tgt-admin", "--delete", targetIqn).CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback target by tgt-admin: %s", out)
}
}
}()
// Add a portal on a target
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "new").CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to add a portal on a target %s: %s", targetIqn, out)
}
defer func() {
// rollback the portal
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(deferCtx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "-o", "delete").CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback a portal on a target %s: %s", targetIqn, out)
}
}
}()
// Login a portal on a target
out, err = exec.CommandContext(ctx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--login").CombinedOutput()
if err != nil {
exiterr, ok := err.(*exec.ExitError)
if !ok || iscsi.Errno(exiterr.ExitCode()) != iscsi.ESESSEXISTS
|
}
defer func() {
// NOTE(fuweid): Basically, do login only once. The rollback doesn't impact other running portal.
if retErr != nil {
deferCtx, deferCancel := rollbackContext()
defer deferCancel()
out, err = exec.CommandContext(deferCtx, "iscsiadm", "-m", "node", "-p", defaultPortal, "-T", targetIqn, "--logout").CombinedOutput()
if err != nil {
log.G(deferCtx).WithError(err).Warnf("failed to rollback to logout on a target %s: %s", targetIqn, out)
}
}
}()
// Find the session and hostNumber mapping
hostToSessionID, err := iscsi.GetISCSIHostSessionMapForTarget(targetIqn, defaultPortal)
if err != nil {
return errors.Wrapf(err, "failed to get hostNumber->SessionID mapping for %s", targetIqn)
}
if len(hostToSessionID) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// The device doesn't show up instantly. Need retry here.
var lastErr error = nil
var mountPoint = o.tgtTargetMountpoint(snID)
for i := 1; i <= maxAttachAttempts; i++ {
for hostNumber, sessionIDs := range hostToSessionID {
if len(sessionIDs) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// Assume that both channelID and targetID are zero.
devices, err := iscsi.GetDevicesForTarget(targetIqn, hostNumber, sessionIDs[0], 0, 0)
if err != nil {
return err
}
if len(devices) != 1 {
lastErr = errors.Errorf("unexpected devices %v for %s", devices, targetIqn)
break
}
var mflag uintptr = unix.MS_RDONLY
if writable {
mflag = 0
}
// TODO(fuweid): how to support multiple filesystem?
if err := unix.Mount(devices[0], mountPoint, "ext4", mflag, ""); err != nil {
return errors.Wrapf(err, "failed to mount the device %s on %s", devices[0], mountPoint)
}
lastErr = nil
}
if lastErr == nil {
break
}
time.Sleep(1 * time.Second)
}
return lastErr
}
// constructOverlayBDSpec generates the config spec for OverlayBD backing store.
func (o *snapshotter) constructOverlayBDSpec(ctx context.Context, key string, writable bool) error {
id, info, _, err := storage.GetInfo(ctx, key)
if err != nil {
return errors.Wrapf(err, "failed to get info for snapshot %s", key)
}
stype, err := o.identifySnapshotStorageType(id, info)
if err != nil {
return errors.Wrapf(err, "failed to identify storage of snapshot %s", key)
}
configJSON := OverlayBDBSConfig{
Lowers: []OverlayBDBSConfigLower{},
ResultFile: o.tgtOverlayBDInitDebuglogPath(id),
}
// load the parent's config and reuse the lowerdir
if info.Parent != "" {
parentConfJSON, err := o.loadBackingStoreConfig(ctx, info.Parent)
if err != nil {
return err
}
configJSON.Lowers = parentConfJSON.Lowers
}
switch stype {
case storageTypeRemoteBlock:
if writable {
return errors.Errorf("remote block device is readonly, not support writable")
}
blobSize, err := strconv.Atoi(info.Labels[labelKeyOverlayBDBlobSize])
if err != nil {
return errors.Wrapf(err, "failed to parse value of label %s of snapshot %s", labelKeyOverlayBDBlobSize, key)
}
blobDigest := info.Labels[labelKeyOverlayBDBlobDigest]
blobPrefixURL, err := o.constructImageBlobURL(info.Labels[labelKeyImageRef])
if err != nil {
return errors.Wrapf(err, "failed to construct image blob prefix url for snapshot %s", key)
}
configJSON.RepoBlobURL = blobPrefixURL
configJSON.Lowers = append(configJSON.Lowers, OverlayBDBSConfigLower{
Digest: blobDigest,
Size: int64(blobSize),
|
{
return errors.Wrapf(err, "failed to login a portal on a target %s: %s", targetIqn, out)
}
|
conditional_block
|
iscsi.go
|
s", targetIqn)
}
if len(hostToSessionID) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// The device doesn't show up instantly. Need retry here.
var lastErr error = nil
var mountPoint = o.tgtTargetMountpoint(snID)
for i := 1; i <= maxAttachAttempts; i++ {
for hostNumber, sessionIDs := range hostToSessionID {
if len(sessionIDs) != 1 {
return errors.Errorf("unexpected hostNumber->SessionID mapping result %v for %s", hostToSessionID, targetIqn)
}
// Assume that both channelID and targetID are zero.
devices, err := iscsi.GetDevicesForTarget(targetIqn, hostNumber, sessionIDs[0], 0, 0)
if err != nil {
return err
}
if len(devices) != 1 {
lastErr = errors.Errorf("unexpected devices %v for %s", devices, targetIqn)
break
}
var mflag uintptr = unix.MS_RDONLY
if writable {
mflag = 0
}
// TODO(fuweid): how to support multiple filesystem?
if err := unix.Mount(devices[0], mountPoint, "ext4", mflag, ""); err != nil {
return errors.Wrapf(err, "failed to mount the device %s on %s", devices[0], mountPoint)
}
lastErr = nil
}
if lastErr == nil {
break
}
time.Sleep(1 * time.Second)
}
return lastErr
}
// constructOverlayBDSpec generates the config spec for OverlayBD backing store.
func (o *snapshotter) constructOverlayBDSpec(ctx context.Context, key string, writable bool) error {
id, info, _, err := storage.GetInfo(ctx, key)
if err != nil {
return errors.Wrapf(err, "failed to get info for snapshot %s", key)
}
stype, err := o.identifySnapshotStorageType(id, info)
if err != nil {
return errors.Wrapf(err, "failed to identify storage of snapshot %s", key)
}
configJSON := OverlayBDBSConfig{
Lowers: []OverlayBDBSConfigLower{},
ResultFile: o.tgtOverlayBDInitDebuglogPath(id),
}
// load the parent's config and reuse the lowerdir
if info.Parent != "" {
parentConfJSON, err := o.loadBackingStoreConfig(ctx, info.Parent)
if err != nil {
return err
}
configJSON.Lowers = parentConfJSON.Lowers
}
switch stype {
case storageTypeRemoteBlock:
if writable {
return errors.Errorf("remote block device is readonly, not support writable")
}
blobSize, err := strconv.Atoi(info.Labels[labelKeyOverlayBDBlobSize])
if err != nil {
return errors.Wrapf(err, "failed to parse value of label %s of snapshot %s", labelKeyOverlayBDBlobSize, key)
}
blobDigest := info.Labels[labelKeyOverlayBDBlobDigest]
blobPrefixURL, err := o.constructImageBlobURL(info.Labels[labelKeyImageRef])
if err != nil {
return errors.Wrapf(err, "failed to construct image blob prefix url for snapshot %s", key)
}
configJSON.RepoBlobURL = blobPrefixURL
configJSON.Lowers = append(configJSON.Lowers, OverlayBDBSConfigLower{
Digest: blobDigest,
Size: int64(blobSize),
Dir: o.upperPath(id),
})
case storageTypeLocalBlock:
if writable {
return errors.Errorf("local block device is readonly, not support writable")
}
configJSON.Lowers = append(configJSON.Lowers, OverlayBDBSConfigLower{
Dir: o.upperPath(id),
})
default:
if !writable || info.Parent == "" {
return errors.Errorf("unexpect storage %v of snapshot %v during construct overlaybd spec(writable=%v, parent=%s)", stype, key, writable, info.Parent)
}
if err := o.prepareWritableOverlaybd(ctx, id); err != nil {
return err
}
configJSON.Upper = OverlayBDBSConfigUpper{
Index: o.tgtOverlayBDWritableIndexPath(id),
Data: o.tgtOverlayBDWritableDataPath(id),
}
}
return o.atomicWriteBackingStoreAndTargetConfig(ctx, id, key, configJSON)
}
// loadBackingStoreConfig loads OverlayBD backing store config.
func (o *snapshotter) loadBackingStoreConfig(ctx context.Context, snKey string) (*OverlayBDBSConfig, error) {
id, _, _, err := storage.GetInfo(ctx, snKey)
if err != nil {
return nil, errors.Wrapf(err, "failed to get info of snapshot %s", snKey)
}
confPath := o.tgtOverlayBDConfPath(id)
data, err := ioutil.ReadFile(confPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read config(path=%s) of snapshot %s", confPath, snKey)
}
var configJSON OverlayBDBSConfig
if err := json.Unmarshal(data, &configJSON); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal data(%s)", string(data))
}
return &configJSON, nil
}
// constructImageBlobURL returns the https://host/v2/<name>/blobs/.
//
// TODO(fuweid): How to know the existing url schema?
func (o *snapshotter) constructImageBlobURL(ref string) (string, error) {
refspec, err := reference.Parse(ref)
if err != nil {
return "", errors.Wrapf(err, "invalid repo url %s", ref)
}
host := refspec.Hostname()
repo := strings.TrimPrefix(refspec.Locator, host+"/")
return "https://" + path.Join(host, "v2", repo) + "/blobs", nil
}
// atomicWriteBackingStoreAndTargetConfig
func (o *snapshotter) atomicWriteBackingStoreAndTargetConfig(ctx context.Context, snID string, snKey string, configJSON OverlayBDBSConfig) error {
data, err := json.Marshal(configJSON)
if err != nil {
return errors.Wrapf(err, "failed to marshal %+v configJSON into JSON", configJSON)
}
confPath := o.tgtOverlayBDConfPath(snID)
if err := continuity.AtomicWriteFile(confPath, data, 0600); err != nil {
return errors.Wrapf(err, "failed to commit the OverlayBD config on %s", confPath)
}
confDataStr := generateTargetConfInXML(o.tgtTargetIqn(snID, snKey), confPath)
targetConfPath := o.tgtTargetConfPath(snID, snKey)
return errors.Wrapf(continuity.AtomicWriteFile(targetConfPath, []byte(confDataStr), 0600),
"failed to commit the target config on %s", targetConfPath)
}
// prepareWritableOverlaybd
func (o *snapshotter) prepareWritableOverlaybd(ctx context.Context, snID string) error {
binpath := filepath.Join(o.config.OverlayBDUtilBinDir, "overlaybd-create")
// TODO(fuweid): 256GB can be configurable?
out, err := exec.CommandContext(ctx, binpath,
o.tgtOverlayBDWritableDataPath(snID),
o.tgtOverlayBDWritableIndexPath(snID), "256").CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to prepare writable overlaybd: %s", out)
}
return nil
}
// commitWritableOverlaybd
func (o *snapshotter) commitWritableOverlaybd(ctx context.Context, snID string) (retErr error) {
binpath := filepath.Join(o.config.OverlayBDUtilBinDir, "overlaybd-commit")
tmpPath := filepath.Join(o.root, "snapshots", snID, "block", ".commit-before-zfile")
out, err := exec.CommandContext(ctx, binpath,
o.tgtOverlayBDWritableDataPath(snID),
o.tgtOverlayBDWritableIndexPath(snID), tmpPath).CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to commit writable overlaybd: %s", out)
}
defer func() {
os.Remove(tmpPath)
}()
binpath = filepath.Join(o.config.OverlayBDUtilBinDir, "overlaybd-zfile")
out, err = exec.CommandContext(ctx, binpath, tmpPath, o.magicFilePath(snID)).CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to create zfile: %s", out)
}
return nil
}
// generateTargetConfInXML
func generateTargetConfInXML(targetIqn string, configPath string) string {
const fmtTargetConf = `<target %q>
<backing-store %s>
bs-type %s
</backing-store>
initiator-address %s
</target>
`
return fmt.Sprintf(fmtTargetConf, targetIqn, configPath, tgtBackingStoreOverlayBD, defaultInitiatorAddress)
}
// TODO: use device number to check?
func
|
lookup
|
identifier_name
|
|
cdk.py
|
apiGatewayResource = apiGateway.root.add_resource('vaquita')
apiGatewayLandingPageResource = apiGatewayResource.add_resource('web')
apiGatewayGetSignedUrlResource = apiGatewayResource.add_resource('signedUrl')
apiGatewayImageSearchResource = apiGatewayResource.add_resource('search')
### landing page function
getLandingPageFunction = _lambda.Function(self, "VAQUITA_GET_LANDING_PAGE",
function_name="VAQUITA_GET_LANDING_PAGE",
runtime=_lambda.Runtime.PYTHON_3_7,
handler="main.handler",
code=_lambda.Code.asset("./src/landingPage"))
getLandingPageIntegration = _apigw.LambdaIntegration(
getLandingPageFunction,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayLandingPageResource.add_method('GET', getLandingPageIntegration,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}])
### cognito
required_attribute = _cognito.StandardAttribute(required=True)
usersPool = _cognito.UserPool(self, "VAQUITA_USERS_POOL",
auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
self_sign_up_enabled=True)
userPoolAppClient = _cognito.CfnUserPoolClient(self, "VAQUITA_USERS_POOL_APP_CLIENT",
supported_identity_providers=["COGNITO"],
allowed_o_auth_flows=["implicit"],
allowed_o_auth_scopes=["phone", "email", "openid", "profile"],
user_pool_id=usersPool.user_pool_id,
callback_ur_ls=[apiGatewayLandingPageResource.url],
allowed_o_auth_flows_user_pool_client=True,
explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])
userPoolDomain = _cognito.UserPoolDomain(self, "VAQUITA_USERS_POOL_DOMAIN",
user_pool=usersPool,
cognito_domain=_cognito.CognitoDomainOptions(domain_prefix="vaquita"))
### get signed URL function
getSignedUrlFunction = _lambda.Function(self, "VAQUITA_GET_SIGNED_URL",
function_name="VAQUITA_GET_SIGNED_URL",
environment={"VAQUITA_IMAGES_BUCKET": imagesS3Bucket.bucket_name},
runtime=_lambda.Runtime.PYTHON_3_7,
handler="main.handler",
code=_lambda.Code.asset("./src/getSignedUrl"))
getSignedUrlIntegration = _apigw.LambdaIntegration(
getSignedUrlFunction,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayGetSignedUrlAuthorizer = _apigw.CfnAuthorizer(self, "VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
rest_api_id=apiGatewayGetSignedUrlResource.rest_api.rest_api_id,
name="VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
type="COGNITO_USER_POOLS", #_apigw.AuthorizationType.COGNITO,
identity_source="method.request.header.Authorization",
provider_arns=[usersPool.user_pool_arn])
apiGatewayGetSignedUrlResource.add_method('GET', getSignedUrlIntegration,
authorization_type=_apigw.AuthorizationType.COGNITO,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}]
).node.find_child('Resource').add_property_override('AuthorizerId', apiGatewayGetSignedUrlAuthorizer.ref)
imagesS3Bucket.grant_put(getSignedUrlFunction, objects_key_pattern="new/*")
### image massage function
imageMassageFunction = _lambda.Function(self, "VAQUITA_IMAGE_MASSAGE",
function_name="VAQUITA_IMAGE_MASSAGE",
timeout=core.Duration.seconds(6),
runtime=_lambda.Runtime.PYTHON_3_7,
environment={"VAQUITA_IMAGE_MASSAGE": imageQueue.queue_name},
handler="main.handler",
code=_lambda.Code.asset("./src/imageMassage"))
imagesS3Bucket.grant_write(imageMassageFunction, "processed/*")
imagesS3Bucket.grant_delete(imageMassageFunction, "new/*")
imagesS3Bucket.grant_read(imageMassageFunction, "new/*")
newImageAddedNotification = _s3notification.LambdaDestination(imageMassageFunction)
imagesS3Bucket.add_event_notification(_s3.EventType.OBJECT_CREATED,
newImageAddedNotification,
_s3.NotificationKeyFilter(prefix="new/")
)
imageQueue.grant_send_messages(imageMassageFunction)
### image analyzer function
imageAnalyzerFunction = _lambda.Function(self, "VAQUITA_IMAGE_ANALYSIS",
function_name="VAQUITA_IMAGE_ANALYSIS",
runtime=_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.seconds(10),
environment={
"VAQUITA_IMAGES_BUCKET": imagesS3Bucket.bucket_name,
"REGION": core.Aws.REGION,
},
handler="main.handler",
code=_lambda.Code.asset("./src/imageAnalysis"))
imageAnalyzerFunction.add_event_source(_lambda_event_source.SqsEventSource(queue=imageQueue, batch_size=10))
imageQueue.grant_consume_messages(imageMassageFunction)
lambda_rekognition_access = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
resources=["*"]
)
imageAnalyzerFunction.add_to_role_policy(lambda_rekognition_access)
imagesS3Bucket.grant_read(imageAnalyzerFunction, "processed/*")
### API gateway finalizing
self.add_cors_options(apiGatewayGetSignedUrlResource)
self.add_cors_options(apiGatewayLandingPageResource)
self.add_cors_options(apiGatewayImageSearchResource)
### secret manager
database_secret = _secrets_manager.Secret(self, "VAQUITA_DATABASE_SECRET",
secret_name="rds-db-credentials/vaquita-rds-secret",
generate_secret_string=_secrets_manager.SecretStringGenerator(
generate_string_key='password',
secret_string_template='{"username": "dba"}',
require_each_included_type=True
)
)
database = _rds.CfnDBCluster(self, "VAQUITA_DATABASE",
engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
engine_mode="serverless",
# availability_zones=vpc.availability_zones,
database_name="images_labels",
enable_http_endpoint=True,
deletion_protection=False,
# enable_cloudwatch_logs_exports=["error"],
master_username=database_secret.secret_value_from_json("username").to_string(),
master_user_password=database_secret.secret_value_from_json("password").to_string(),
scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
auto_pause=True,
min_capacity=2,
max_capacity=8,
seconds_until_auto_pause=1800
),
)
database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
### secret manager
secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"VAQUITA_DATABASE_SECRET_TARGET",
target_type="AWS::RDS::DBCluster",
target_id=database.ref,
secret_id=database_secret.secret_arn
)
secret_target.node.add_dependency(database)
### database function
image_data_function_role = _iam.Role(self, "VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
role_name="VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
]
)
image_data_function = _lambda.Function(self, "VAQUITA_IMAGE_DATA",
function_name="VAQUITA_IMAGE_DATA",
runtime=_lambda.Runtime.PYTHON_3_7
|
super().__init__(scope, id, **kwargs)
### S3 core
imagesS3Bucket = _s3.Bucket(self, "VAQUITA_IMAGES")
imagesS3Bucket.add_cors_rule(
allowed_methods=[_s3.HttpMethods.POST],
allowed_origins=["*"] # add API gateway web resource URL
)
### SQS core
imageDeadletterQueue = _sqs.Queue(self, "VAQUITA_IMAGES_DEADLETTER_QUEUE")
imageQueue = _sqs.Queue(self, "VAQUITA_IMAGES_QUEUE",
dead_letter_queue={
"max_receive_count": 3,
"queue": imageDeadletterQueue
})
### api gateway core
apiGateway = _apigw.RestApi(self, 'VAQUITA_API_GATEWAY', rest_api_name='VaquitaApiGateway')
|
identifier_body
|
|
cdk.py
|
Integration = _apigw.LambdaIntegration(
getSignedUrlFunction,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayGetSignedUrlAuthorizer = _apigw.CfnAuthorizer(self, "VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
rest_api_id=apiGatewayGetSignedUrlResource.rest_api.rest_api_id,
name="VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
type="COGNITO_USER_POOLS", #_apigw.AuthorizationType.COGNITO,
identity_source="method.request.header.Authorization",
provider_arns=[usersPool.user_pool_arn])
apiGatewayGetSignedUrlResource.add_method('GET', getSignedUrlIntegration,
authorization_type=_apigw.AuthorizationType.COGNITO,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}]
).node.find_child('Resource').add_property_override('AuthorizerId', apiGatewayGetSignedUrlAuthorizer.ref)
imagesS3Bucket.grant_put(getSignedUrlFunction, objects_key_pattern="new/*")
### image massage function
imageMassageFunction = _lambda.Function(self, "VAQUITA_IMAGE_MASSAGE",
function_name="VAQUITA_IMAGE_MASSAGE",
timeout=core.Duration.seconds(6),
runtime=_lambda.Runtime.PYTHON_3_7,
environment={"VAQUITA_IMAGE_MASSAGE": imageQueue.queue_name},
handler="main.handler",
code=_lambda.Code.asset("./src/imageMassage"))
imagesS3Bucket.grant_write(imageMassageFunction, "processed/*")
imagesS3Bucket.grant_delete(imageMassageFunction, "new/*")
imagesS3Bucket.grant_read(imageMassageFunction, "new/*")
newImageAddedNotification = _s3notification.LambdaDestination(imageMassageFunction)
imagesS3Bucket.add_event_notification(_s3.EventType.OBJECT_CREATED,
newImageAddedNotification,
_s3.NotificationKeyFilter(prefix="new/")
)
imageQueue.grant_send_messages(imageMassageFunction)
### image analyzer function
imageAnalyzerFunction = _lambda.Function(self, "VAQUITA_IMAGE_ANALYSIS",
function_name="VAQUITA_IMAGE_ANALYSIS",
runtime=_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.seconds(10),
environment={
"VAQUITA_IMAGES_BUCKET": imagesS3Bucket.bucket_name,
"REGION": core.Aws.REGION,
},
handler="main.handler",
code=_lambda.Code.asset("./src/imageAnalysis"))
imageAnalyzerFunction.add_event_source(_lambda_event_source.SqsEventSource(queue=imageQueue, batch_size=10))
imageQueue.grant_consume_messages(imageMassageFunction)
lambda_rekognition_access = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
resources=["*"]
)
imageAnalyzerFunction.add_to_role_policy(lambda_rekognition_access)
imagesS3Bucket.grant_read(imageAnalyzerFunction, "processed/*")
### API gateway finalizing
self.add_cors_options(apiGatewayGetSignedUrlResource)
self.add_cors_options(apiGatewayLandingPageResource)
self.add_cors_options(apiGatewayImageSearchResource)
### secret manager
database_secret = _secrets_manager.Secret(self, "VAQUITA_DATABASE_SECRET",
secret_name="rds-db-credentials/vaquita-rds-secret",
generate_secret_string=_secrets_manager.SecretStringGenerator(
generate_string_key='password',
secret_string_template='{"username": "dba"}',
require_each_included_type=True
)
)
database = _rds.CfnDBCluster(self, "VAQUITA_DATABASE",
engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
engine_mode="serverless",
# availability_zones=vpc.availability_zones,
database_name="images_labels",
enable_http_endpoint=True,
deletion_protection=False,
# enable_cloudwatch_logs_exports=["error"],
master_username=database_secret.secret_value_from_json("username").to_string(),
master_user_password=database_secret.secret_value_from_json("password").to_string(),
scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
auto_pause=True,
min_capacity=2,
max_capacity=8,
seconds_until_auto_pause=1800
),
)
database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
### secret manager
secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"VAQUITA_DATABASE_SECRET_TARGET",
target_type="AWS::RDS::DBCluster",
target_id=database.ref,
secret_id=database_secret.secret_arn
)
secret_target.node.add_dependency(database)
### database function
image_data_function_role = _iam.Role(self, "VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
role_name="VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
]
)
image_data_function = _lambda.Function(self, "VAQUITA_IMAGE_DATA",
function_name="VAQUITA_IMAGE_DATA",
runtime=_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.seconds(5),
role=image_data_function_role,
# vpc=vpc,
# vpc_subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.ISOLATED),
environment={
"CLUSTER_ARN": database_cluster_arn,
"CREDENTIALS_ARN": database_secret.secret_arn,
"DB_NAME": database.database_name,
"REGION": core.Aws.REGION
},
handler="main.handler",
code=_lambda.Code.asset("./src/imageData")
)
imageSearchIntegration = _apigw.LambdaIntegration(
image_data_function,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayImageSearchAuthorizer = _apigw.CfnAuthorizer(self, "VAQUITA_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
rest_api_id=apiGatewayImageSearchResource.rest_api.rest_api_id,
name="VAQUITA_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
type="COGNITO_USER_POOLS", #_apigw.AuthorizationType.COGNITO,
identity_source="method.request.header.Authorization",
provider_arns=[usersPool.user_pool_arn])
apiGatewayImageSearchResource.add_method('POST', imageSearchIntegration,
authorization_type=_apigw.AuthorizationType.COGNITO,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}]
).node.find_child('Resource').add_property_override('AuthorizerId', apiGatewayImageSearchAuthorizer.ref)
lambda_access_search = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["translate:TranslateText"],
resources=["*"] #tbc [elasticSearch.attr_arn]
)
image_data_function.add_to_role_policy(lambda_access_search)
### custom resource
lambda_provider = Provider(self, 'VAQUITA_IMAGE_DATA_PROVIDER',
on_event_handler=image_data_function
)
CustomResource(self, 'VAQUITA_IMAGE_DATA_RESOURCE',
service_token=lambda_provider.service_token,
pascal_case_properties=False,
resource_type="Custom::SchemaCreation",
properties={
"source": "Cloudformation"
}
)
### event bridge
event_bus = _events.EventBus(self, "VAQUITA_IMAGE_CONTENT_BUS")
event_rule = _events.Rule(self, "VAQUITA_IMAGE_CONTENT_RULE",
rule_name="VAQUITA_IMAGE_CONTENT_RULE",
description="The event from image analyzer to store the data",
event_bus=event_bus,
event_pattern=_events.EventPattern(resources=[imageAnalyzerFunction.function_arn]),
)
event_rule.add_target(_event_targets.LambdaFunction(image_data_function))
event_bus.grant_put_events(imageAnalyzerFunction)
imageAnalyzerFunction.add_environment("EVENT_BUS", event_bus.event_bus_name)
### outputs
core.CfnOutput(self, 'CognitoHostedUILogin',
value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(userPoolDomain.domain_name, core.Aws.REGION, userPoolAppClient.ref, '+'.join(userPoolAppClient.allowed_o_auth_scopes), apiGatewayLandingPageResource.url),
description='The Cognito Hosted UI Login Page'
)
def
|
add_cors_options
|
identifier_name
|
|
cdk.py
|
3,
"queue": imageDeadletterQueue
})
### api gateway core
apiGateway = _apigw.RestApi(self, 'VAQUITA_API_GATEWAY', rest_api_name='VaquitaApiGateway')
apiGatewayResource = apiGateway.root.add_resource('vaquita')
apiGatewayLandingPageResource = apiGatewayResource.add_resource('web')
apiGatewayGetSignedUrlResource = apiGatewayResource.add_resource('signedUrl')
apiGatewayImageSearchResource = apiGatewayResource.add_resource('search')
### landing page function
getLandingPageFunction = _lambda.Function(self, "VAQUITA_GET_LANDING_PAGE",
function_name="VAQUITA_GET_LANDING_PAGE",
runtime=_lambda.Runtime.PYTHON_3_7,
handler="main.handler",
code=_lambda.Code.asset("./src/landingPage"))
getLandingPageIntegration = _apigw.LambdaIntegration(
getLandingPageFunction,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayLandingPageResource.add_method('GET', getLandingPageIntegration,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}])
### cognito
required_attribute = _cognito.StandardAttribute(required=True)
usersPool = _cognito.UserPool(self, "VAQUITA_USERS_POOL",
auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
self_sign_up_enabled=True)
userPoolAppClient = _cognito.CfnUserPoolClient(self, "VAQUITA_USERS_POOL_APP_CLIENT",
|
allowed_o_auth_flows_user_pool_client=True,
explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])
userPoolDomain = _cognito.UserPoolDomain(self, "VAQUITA_USERS_POOL_DOMAIN",
user_pool=usersPool,
cognito_domain=_cognito.CognitoDomainOptions(domain_prefix="vaquita"))
### get signed URL function
getSignedUrlFunction = _lambda.Function(self, "VAQUITA_GET_SIGNED_URL",
function_name="VAQUITA_GET_SIGNED_URL",
environment={"VAQUITA_IMAGES_BUCKET": imagesS3Bucket.bucket_name},
runtime=_lambda.Runtime.PYTHON_3_7,
handler="main.handler",
code=_lambda.Code.asset("./src/getSignedUrl"))
getSignedUrlIntegration = _apigw.LambdaIntegration(
getSignedUrlFunction,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayGetSignedUrlAuthorizer = _apigw.CfnAuthorizer(self, "VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
rest_api_id=apiGatewayGetSignedUrlResource.rest_api.rest_api_id,
name="VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
type="COGNITO_USER_POOLS", #_apigw.AuthorizationType.COGNITO,
identity_source="method.request.header.Authorization",
provider_arns=[usersPool.user_pool_arn])
apiGatewayGetSignedUrlResource.add_method('GET', getSignedUrlIntegration,
authorization_type=_apigw.AuthorizationType.COGNITO,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}]
).node.find_child('Resource').add_property_override('AuthorizerId', apiGatewayGetSignedUrlAuthorizer.ref)
imagesS3Bucket.grant_put(getSignedUrlFunction, objects_key_pattern="new/*")
### image massage function
imageMassageFunction = _lambda.Function(self, "VAQUITA_IMAGE_MASSAGE",
function_name="VAQUITA_IMAGE_MASSAGE",
timeout=core.Duration.seconds(6),
runtime=_lambda.Runtime.PYTHON_3_7,
environment={"VAQUITA_IMAGE_MASSAGE": imageQueue.queue_name},
handler="main.handler",
code=_lambda.Code.asset("./src/imageMassage"))
imagesS3Bucket.grant_write(imageMassageFunction, "processed/*")
imagesS3Bucket.grant_delete(imageMassageFunction, "new/*")
imagesS3Bucket.grant_read(imageMassageFunction, "new/*")
newImageAddedNotification = _s3notification.LambdaDestination(imageMassageFunction)
imagesS3Bucket.add_event_notification(_s3.EventType.OBJECT_CREATED,
newImageAddedNotification,
_s3.NotificationKeyFilter(prefix="new/")
)
imageQueue.grant_send_messages(imageMassageFunction)
### image analyzer function
imageAnalyzerFunction = _lambda.Function(self, "VAQUITA_IMAGE_ANALYSIS",
function_name="VAQUITA_IMAGE_ANALYSIS",
runtime=_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.seconds(10),
environment={
"VAQUITA_IMAGES_BUCKET": imagesS3Bucket.bucket_name,
"REGION": core.Aws.REGION,
},
handler="main.handler",
code=_lambda.Code.asset("./src/imageAnalysis"))
imageAnalyzerFunction.add_event_source(_lambda_event_source.SqsEventSource(queue=imageQueue, batch_size=10))
imageQueue.grant_consume_messages(imageMassageFunction)
lambda_rekognition_access = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
resources=["*"]
)
imageAnalyzerFunction.add_to_role_policy(lambda_rekognition_access)
imagesS3Bucket.grant_read(imageAnalyzerFunction, "processed/*")
### API gateway finalizing
self.add_cors_options(apiGatewayGetSignedUrlResource)
self.add_cors_options(apiGatewayLandingPageResource)
self.add_cors_options(apiGatewayImageSearchResource)
### secret manager
database_secret = _secrets_manager.Secret(self, "VAQUITA_DATABASE_SECRET",
secret_name="rds-db-credentials/vaquita-rds-secret",
generate_secret_string=_secrets_manager.SecretStringGenerator(
generate_string_key='password',
secret_string_template='{"username": "dba"}',
require_each_included_type=True
)
)
database = _rds.CfnDBCluster(self, "VAQUITA_DATABASE",
engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
engine_mode="serverless",
# availability_zones=vpc.availability_zones,
database_name="images_labels",
enable_http_endpoint=True,
deletion_protection=False,
# enable_cloudwatch_logs_exports=["error"],
master_username=database_secret.secret_value_from_json("username").to_string(),
master_user_password=database_secret.secret_value_from_json("password").to_string(),
scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
auto_pause=True,
min_capacity=2,
max_capacity=8,
seconds_until_auto_pause=1800
),
)
database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
### secret manager
secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"VAQUITA_DATABASE_SECRET_TARGET",
target_type="AWS::RDS::DBCluster",
target_id=database.ref,
secret_id=database_secret.secret_arn
)
secret_target.node.add_dependency(database)
### database function
image_data_function_role = _iam.Role(self, "VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
role_name="VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
]
)
image_data_function = _lambda.Function(self, "VAQUITA_IMAGE_DATA",
function_name="VAQUITA_IMAGE_DATA",
runtime=_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.seconds(5),
role=image_data_function_role,
# vpc=vpc,
# vpc_subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.ISOLATED),
environment={
"CLUSTER_ARN": database_cluster_arn,
"CREDENTIALS_ARN": database_secret.secret_arn,
"DB_NAME": database.database_name,
"REGION": core.Aws.REGION
},
handler="main.handler",
code=_lambda.Code.asset("./src/imageData")
)
imageSearchIntegration = _apigw.LambdaIntegration
|
supported_identity_providers=["COGNITO"],
allowed_o_auth_flows=["implicit"],
allowed_o_auth_scopes=["phone", "email", "openid", "profile"],
user_pool_id=usersPool.user_pool_id,
callback_ur_ls=[apiGatewayLandingPageResource.url],
|
random_line_split
|
Tablemodels.py
|
.database = db
self.connect = connection
self.cursor = cursor
self.valueDic = {}
self.rows = []
self.fields = []
self.fieldTypeDic = {}
self.fieldTypes = {'DOUBLE': 'D',
'FLOAT':'F',
'INTEGER': 'N',
'BOOL': 'C',
'REAL': 'F',
'TEXT':'C',
'POLYGON':5,
'POINT':1,
'LINESTRING':3,
}
def sqlCommands(self):
'sql for retrieval and insertion'
self.sqlFieldInfo = 'PRAGMA table_info({0})'
self.sqlSelect = 'SELECT {0} FROM {1} '
self.sqlInsert = 'INSERT INTO {0}({1}) VALUES ({2})'
self.sqlCreate = "SELECT sql FROM sqlite_master WHERE type = 'table' AND name = '{0}'"
self.sqlUpdate = "UPDATE {0} "
self.sqlSet = "SET {0} = ? "
self.sqlSelectByID = 'PK_UID BETWEEN ? AND ?'
self.sqlWhere = 'WHERE {0}'
def fieldsGen(self):
'get table fields and create name:type dictionary'
fields = self.sqlFieldInfo, self.name
self.curX(fields)
retFields = self.curAll()
for field in retFields:
fieldval = field[1]
typeval = field[2]
self.fieldTypeDic[fieldval] = typeval
self.fields.append(fieldval)
def getFieldsByType(self, type):
'get name fo fields for a given field type'
typeResults = find_key(self.fieldDic, type)
return typeResults
def find_key(self,dic, val):
"""return the key of dictionary dic given the value"""
return [k for k, v in dic.iteritems() if v == val][0]
def changeFieldOrder(self):
'adjust recorded order of fields'
return
def valuesGen(self,params,min=0,max=None):
'get selected rows of data'
self.rows=[]
if max == None:
params = (fields, self.name)
select = self.sqlSelect, params
else:
params = (fields, self.name)
args= min, max
select = self.sqlSelect+ self.sqlSelectByID, params, args
self.curX(select)
valResults = self.curAll()
for COUNTER, field in enumerate(self.fieldDic):
self.valueDic[field] = []
for COUNTY,value in enumerate(valResults):
dic = {}
dic[COUNTY] = value[COUNTER]
self.valueDic[field].append(dic)
self.rows.append(value))
def tableDicGen(self):
'create tkintertable representation of selected data '
dataDic = {}
valueRange = len(self.rows)
for i in range(1,valueRange+1):
dataDic[str(i)] = {}
for COUNTROW, row in enumerate(self.rows):
for COUNTERX, field in enumerate(self.fields):
for value in row:
dataDic[COUNTROW][field] = value
self.tableDic = dataDic
def csvRowsGen(self,sep=','):
'create CSV representation of selected data'
for COUNTER, column in enumerate(self.fields):
header = ''
if COUNTER != len(self.field)-1:
header += column + sep
else:
header += column +'\n'
self.csvFields = header
self.csvRows = []
for row in self.rows:
line = ''
for val in row:
if COUNTER != len(row)-1:
line += val + sep
else:
line += val + '\n'
self.csvRows.append(line)
def xlsDicGen(self):
'create xls representation of selected data'
self.xlsFields - self.fields
self.xlsRows = self.rows
def insert(self, params, args):
'insert into table'
insertsql = self.sqlInsert, params, args
self.curX(insertsql)
self.connect.commit()
def constraintGen(self,params):
where = self.sqlWhere.format(params)
def update(self, params, args, where=None):
newparams = ''
for param in params:
if count == len(params)-1:
newparams = self.sqlSet.format(param) + ','
else:
newparams = self.sqlSet.format(param)
if where:
updatesql = self.sqlUpdate.format(self.name) + self.constraintGen(where), args
else
updatesql = self.sqlUpdate.format(self.name) + newparams,args
self.curX(updatesql)
self.connect.commit()
def curX(self,sql,args=()):
'execute sql statements'
statement = sql[0]
params = sql[1]
self.cursor.execute(statement.format(*params), *args)
return
def curIn(self,sql):
'execute sql statements'
self.curX(sql)
self.connect.commit()
return
def curAll(self,):
results = self.cursor.fetchall()
return results
def curOne(self,):
self.cursor.fetchone()
return
class GeoTableModel(TableModel):
def geoSQL(self):
self.sqlTransform = 'Transform(Geometry,{0}')
self.sqlGeomFromText = 'ST_GeomFromText({0})'
self.sqlPointFromText = 'PointFromText({0},{1})'
self.sqlLineFromText = 'LineFromText({0},{0})'
self.sqlNumInteriorRings = 'NumInteriorRing({0})'
self.sqlGeometryRow = 'GeometryN({0})'
self.sqlGeomAsText = 'ST_GeomAsText({0})'
self.sqlEnvelope = 'Envelope(Geometry)'
self.sqlSrid = 'SRID(Geometry)'
self.sqlArea = 'ST_Area(Geometry)'
self.sqlIntersect = 'Intersects(Geometry,{0}) '
self.sqlWithin = 'Within(Geometry,{0}) '
self.sqlContains = 'Contains(Geometry,{0}) '
self.sqlIntersection = 'Intersection(Geometry,{0}) '
self.sqlCrosses = 'Crosses(Geometry,{0})'
self.sqlBuildMBR = 'BuildMBR({0}) '
self.sqlMBRMaxX = 'MBRMaxX(Geometry) '
self.sqlMBRMaxY = 'MBRMaxY(Geometry) '
self.sqlMBRMinX = 'MBRMinX(Geometry) '
self.sqlMBRMinY = 'MBRMinY(Geometry) '
self.sqlCentroid = 'Centroid(Geometry)'
self.sqlGeomType = 'GeometryType(Geometry'
self.sqlX = 'X(Geometry)'
self.sqlY = 'Y(Geometry)'
self.sqlBuffer = 'Buffer(Geometry, {0})
self.sqlMBRContains = 'MbrContains(Geomtry,{0})'
self.sqlMBRWithin = 'MbrWithin(Geomtry,{0})'
self.sqlMBRIntersects = 'MbrIntersects(Geomtry,{0})'
self.sqlMBROverlaps = 'MbrOverlaps(Geomtry,{0})'
def getGeoType(self):
type = self.sqlSelect, (self.sqlGeomType, self.name)
self.curX(type)
self.type = self.curOne()[0]
def geometryAsText(self):
def getGeoms(self,rows):
def getMBR(self,rows):
def srid(self):
def sridScrape(self, tablename):
sql = "SELECT SRID FROM geom_cols_ref_sys WHERE f_table_name = '%s'" % tablename
self.cursorspatial.execute(sql)
srid = self.cursorspatial.fetchone()[0]
return srid
def parseGeo(self, geometry):
if geometry.find('POINT')!= -1:
geom = geometry.split('(')[1].replace(')','')
geomlist = map(float,geom.split())
else:
|
return partsList
def reverseParseGeo(self, shpReader):
geomtype = find_key(self.fieldTypes, shpReader.shapeType )
if geomtype == 'POINT':
WKTlist = []
WKTtemplate = 'POINT(%f %f)'
shapes = shpReader.shapes()
for shape in shapes:
pnt = shape.points[0]
WKT = WKTtemplate % (pnt[0], pnt[1])
WKTlist.append(WKT)
elif geomtype == 'POLYGON':
WKTtemplate = 'POLYGON(('
WKTlist = []
shapes = shpReader.shapes()
for shape in shapes:
WKT = W
|
partsList = []
geom = geometry.split('((')[1].replace('))','')
partSplit = geom.split('), (')
for part in partSplit:
geomlist = []
geomsplit = part.split(', ')
for COUNTER,geoms in enumerate(geomsplit):
xy = map(float,geoms.split())
geomlist.append(xy)
partsList.append(geomlist)
|
conditional_block
|
Tablemodels.py
|
dictionary'
fields = self.sqlFieldInfo, self.name
self.curX(fields)
retFields = self.curAll()
for field in retFields:
fieldval = field[1]
typeval = field[2]
self.fieldTypeDic[fieldval] = typeval
self.fields.append(fieldval)
def getFieldsByType(self, type):
'get name fo fields for a given field type'
typeResults = find_key(self.fieldDic, type)
return typeResults
def find_key(self,dic, val):
"""return the key of dictionary dic given the value"""
return [k for k, v in dic.iteritems() if v == val][0]
def changeFieldOrder(self):
'adjust recorded order of fields'
return
def valuesGen(self,params,min=0,max=None):
'get selected rows of data'
self.rows=[]
if max == None:
params = (fields, self.name)
select = self.sqlSelect, params
else:
params = (fields, self.name)
args= min, max
select = self.sqlSelect+ self.sqlSelectByID, params, args
self.curX(select)
valResults = self.curAll()
for COUNTER, field in enumerate(self.fieldDic):
self.valueDic[field] = []
for COUNTY,value in enumerate(valResults):
dic = {}
dic[COUNTY] = value[COUNTER]
self.valueDic[field].append(dic)
self.rows.append(value))
def tableDicGen(self):
'create tkintertable representation of selected data '
dataDic = {}
valueRange = len(self.rows)
for i in range(1,valueRange+1):
dataDic[str(i)] = {}
for COUNTROW, row in enumerate(self.rows):
for COUNTERX, field in enumerate(self.fields):
for value in row:
dataDic[COUNTROW][field] = value
self.tableDic = dataDic
def csvRowsGen(self,sep=','):
'create CSV representation of selected data'
for COUNTER, column in enumerate(self.fields):
header = ''
if COUNTER != len(self.field)-1:
header += column + sep
else:
header += column +'\n'
self.csvFields = header
self.csvRows = []
for row in self.rows:
line = ''
for val in row:
if COUNTER != len(row)-1:
line += val + sep
else:
line += val + '\n'
self.csvRows.append(line)
def xlsDicGen(self):
'create xls representation of selected data'
self.xlsFields - self.fields
self.xlsRows = self.rows
def insert(self, params, args):
'insert into table'
insertsql = self.sqlInsert, params, args
self.curX(insertsql)
self.connect.commit()
def constraintGen(self,params):
where = self.sqlWhere.format(params)
def update(self, params, args, where=None):
newparams = ''
for param in params:
if count == len(params)-1:
newparams = self.sqlSet.format(param) + ','
else:
newparams = self.sqlSet.format(param)
if where:
updatesql = self.sqlUpdate.format(self.name) + self.constraintGen(where), args
else
updatesql = self.sqlUpdate.format(self.name) + newparams,args
self.curX(updatesql)
self.connect.commit()
def curX(self,sql,args=()):
'execute sql statements'
statement = sql[0]
params = sql[1]
self.cursor.execute(statement.format(*params), *args)
return
def curIn(self,sql):
'execute sql statements'
self.curX(sql)
self.connect.commit()
return
def curAll(self,):
results = self.cursor.fetchall()
return results
def curOne(self,):
self.cursor.fetchone()
return
class GeoTableModel(TableModel):
def geoSQL(self):
self.sqlTransform = 'Transform(Geometry,{0}')
self.sqlGeomFromText = 'ST_GeomFromText({0})'
self.sqlPointFromText = 'PointFromText({0},{1})'
self.sqlLineFromText = 'LineFromText({0},{0})'
self.sqlNumInteriorRings = 'NumInteriorRing({0})'
self.sqlGeometryRow = 'GeometryN({0})'
self.sqlGeomAsText = 'ST_GeomAsText({0})'
self.sqlEnvelope = 'Envelope(Geometry)'
self.sqlSrid = 'SRID(Geometry)'
self.sqlArea = 'ST_Area(Geometry)'
self.sqlIntersect = 'Intersects(Geometry,{0}) '
self.sqlWithin = 'Within(Geometry,{0}) '
self.sqlContains = 'Contains(Geometry,{0}) '
self.sqlIntersection = 'Intersection(Geometry,{0}) '
self.sqlCrosses = 'Crosses(Geometry,{0})'
self.sqlBuildMBR = 'BuildMBR({0}) '
self.sqlMBRMaxX = 'MBRMaxX(Geometry) '
self.sqlMBRMaxY = 'MBRMaxY(Geometry) '
self.sqlMBRMinX = 'MBRMinX(Geometry) '
self.sqlMBRMinY = 'MBRMinY(Geometry) '
self.sqlCentroid = 'Centroid(Geometry)'
self.sqlGeomType = 'GeometryType(Geometry'
self.sqlX = 'X(Geometry)'
self.sqlY = 'Y(Geometry)'
self.sqlBuffer = 'Buffer(Geometry, {0})
self.sqlMBRContains = 'MbrContains(Geomtry,{0})'
self.sqlMBRWithin = 'MbrWithin(Geomtry,{0})'
self.sqlMBRIntersects = 'MbrIntersects(Geomtry,{0})'
self.sqlMBROverlaps = 'MbrOverlaps(Geomtry,{0})'
def getGeoType(self):
type = self.sqlSelect, (self.sqlGeomType, self.name)
self.curX(type)
self.type = self.curOne()[0]
def geometryAsText(self):
def getGeoms(self,rows):
def getMBR(self,rows):
def srid(self):
def sridScrape(self, tablename):
sql = "SELECT SRID FROM geom_cols_ref_sys WHERE f_table_name = '%s'" % tablename
self.cursorspatial.execute(sql)
srid = self.cursorspatial.fetchone()[0]
return srid
def parseGeo(self, geometry):
if geometry.find('POINT')!= -1:
geom = geometry.split('(')[1].replace(')','')
geomlist = map(float,geom.split())
else:
partsList = []
geom = geometry.split('((')[1].replace('))','')
partSplit = geom.split('), (')
for part in partSplit:
geomlist = []
geomsplit = part.split(', ')
for COUNTER,geoms in enumerate(geomsplit):
xy = map(float,geoms.split())
geomlist.append(xy)
partsList.append(geomlist)
return partsList
def reverseParseGeo(self, shpReader):
geomtype = find_key(self.fieldTypes, shpReader.shapeType )
if geomtype == 'POINT':
WKTlist = []
WKTtemplate = 'POINT(%f %f)'
shapes = shpReader.shapes()
for shape in shapes:
pnt = shape.points[0]
WKT = WKTtemplate % (pnt[0], pnt[1])
WKTlist.append(WKT)
elif geomtype == 'POLYGON':
WKTtemplate = 'POLYGON(('
WKTlist = []
shapes = shpReader.shapes()
for shape in shapes:
WKT = WKTtemplate
points =shape.points
firstCoords = points[0]
countVal = 0
for COUNTER,coords in enumerate(points):
if COUNTER != len(shape.points)-1:
if coords == firstCoords and COUNTER != countVal:
WKT += str(coords[0]) + ' '+ str(coords[1])+ '), ('
firstCoords = points[COUNTER+1]
countVal = COUNTER +1
else:
WKT += str(coords[0]) + ' '+ str(coords[1])+ ', '
else:
WKT += str(coords[0]) + ' '+ str(coords[1])+ '))'
WKTlist.append(WKT)
elif geomtype == 'LINESTRING':
WKTtemplate = 'LINESTRING('
WKTlist = []
shapes = shpReader.shapes()
for shape in shapes:
WKT = WKTtemplate
for COUNTER,coords in enumerate(shape.points):
if COUNTER != len(shape.points)-1:
WKT += str(coords[0]) + ' '+ str(coords[1])+ ', '
else:
|
WKT += str(coords[0]) + ' '+ str(coords[1])+ ')'
|
random_line_split
|
|
Tablemodels.py
|
.database = db
self.connect = connection
self.cursor = cursor
self.valueDic = {}
self.rows = []
self.fields = []
self.fieldTypeDic = {}
self.fieldTypes = {'DOUBLE': 'D',
'FLOAT':'F',
'INTEGER': 'N',
'BOOL': 'C',
'REAL': 'F',
'TEXT':'C',
'POLYGON':5,
'POINT':1,
'LINESTRING':3,
}
def sqlCommands(self):
'sql for retrieval and insertion'
self.sqlFieldInfo = 'PRAGMA table_info({0})'
self.sqlSelect = 'SELECT {0} FROM {1} '
self.sqlInsert = 'INSERT INTO {0}({1}) VALUES ({2})'
self.sqlCreate = "SELECT sql FROM sqlite_master WHERE type = 'table' AND name = '{0}'"
self.sqlUpdate = "UPDATE {0} "
self.sqlSet = "SET {0} = ? "
self.sqlSelectByID = 'PK_UID BETWEEN ? AND ?'
self.sqlWhere = 'WHERE {0}'
def fieldsGen(self):
'get table fields and create name:type dictionary'
fields = self.sqlFieldInfo, self.name
self.curX(fields)
retFields = self.curAll()
for field in retFields:
fieldval = field[1]
typeval = field[2]
self.fieldTypeDic[fieldval] = typeval
self.fields.append(fieldval)
def getFieldsByType(self, type):
'get name fo fields for a given field type'
typeResults = find_key(self.fieldDic, type)
return typeResults
def find_key(self,dic, val):
"""return the key of dictionary dic given the value"""
return [k for k, v in dic.iteritems() if v == val][0]
def changeFieldOrder(self):
'adjust recorded order of fields'
return
def valuesGen(self,params,min=0,max=None):
'get selected rows of data'
self.rows=[]
if max == None:
params = (fields, self.name)
select = self.sqlSelect, params
else:
params = (fields, self.name)
args= min, max
select = self.sqlSelect+ self.sqlSelectByID, params, args
self.curX(select)
valResults = self.curAll()
for COUNTER, field in enumerate(self.fieldDic):
self.valueDic[field] = []
for COUNTY,value in enumerate(valResults):
dic = {}
dic[COUNTY] = value[COUNTER]
self.valueDic[field].append(dic)
self.rows.append(value))
def tableDicGen(self):
'create tkintertable representation of selected data '
dataDic = {}
valueRange = len(self.rows)
for i in range(1,valueRange+1):
dataDic[str(i)] = {}
for COUNTROW, row in enumerate(self.rows):
for COUNTERX, field in enumerate(self.fields):
for value in row:
dataDic[COUNTROW][field] = value
self.tableDic = dataDic
def csvRowsGen(self,sep=','):
'create CSV representation of selected data'
for COUNTER, column in enumerate(self.fields):
header = ''
if COUNTER != len(self.field)-1:
header += column + sep
else:
header += column +'\n'
self.csvFields = header
self.csvRows = []
for row in self.rows:
line = ''
for val in row:
if COUNTER != len(row)-1:
line += val + sep
else:
line += val + '\n'
self.csvRows.append(line)
def xlsDicGen(self):
'create xls representation of selected data'
self.xlsFields - self.fields
self.xlsRows = self.rows
def insert(self, params, args):
'insert into table'
insertsql = self.sqlInsert, params, args
self.curX(insertsql)
self.connect.commit()
def constraintGen(self,params):
where = self.sqlWhere.format(params)
def
|
(self, params, args, where=None):
newparams = ''
for param in params:
if count == len(params)-1:
newparams = self.sqlSet.format(param) + ','
else:
newparams = self.sqlSet.format(param)
if where:
updatesql = self.sqlUpdate.format(self.name) + self.constraintGen(where), args
else
updatesql = self.sqlUpdate.format(self.name) + newparams,args
self.curX(updatesql)
self.connect.commit()
def curX(self,sql,args=()):
'execute sql statements'
statement = sql[0]
params = sql[1]
self.cursor.execute(statement.format(*params), *args)
return
def curIn(self,sql):
'execute sql statements'
self.curX(sql)
self.connect.commit()
return
def curAll(self,):
results = self.cursor.fetchall()
return results
def curOne(self,):
self.cursor.fetchone()
return
class GeoTableModel(TableModel):
def geoSQL(self):
self.sqlTransform = 'Transform(Geometry,{0}')
self.sqlGeomFromText = 'ST_GeomFromText({0})'
self.sqlPointFromText = 'PointFromText({0},{1})'
self.sqlLineFromText = 'LineFromText({0},{0})'
self.sqlNumInteriorRings = 'NumInteriorRing({0})'
self.sqlGeometryRow = 'GeometryN({0})'
self.sqlGeomAsText = 'ST_GeomAsText({0})'
self.sqlEnvelope = 'Envelope(Geometry)'
self.sqlSrid = 'SRID(Geometry)'
self.sqlArea = 'ST_Area(Geometry)'
self.sqlIntersect = 'Intersects(Geometry,{0}) '
self.sqlWithin = 'Within(Geometry,{0}) '
self.sqlContains = 'Contains(Geometry,{0}) '
self.sqlIntersection = 'Intersection(Geometry,{0}) '
self.sqlCrosses = 'Crosses(Geometry,{0})'
self.sqlBuildMBR = 'BuildMBR({0}) '
self.sqlMBRMaxX = 'MBRMaxX(Geometry) '
self.sqlMBRMaxY = 'MBRMaxY(Geometry) '
self.sqlMBRMinX = 'MBRMinX(Geometry) '
self.sqlMBRMinY = 'MBRMinY(Geometry) '
self.sqlCentroid = 'Centroid(Geometry)'
self.sqlGeomType = 'GeometryType(Geometry'
self.sqlX = 'X(Geometry)'
self.sqlY = 'Y(Geometry)'
self.sqlBuffer = 'Buffer(Geometry, {0})
self.sqlMBRContains = 'MbrContains(Geomtry,{0})'
self.sqlMBRWithin = 'MbrWithin(Geomtry,{0})'
self.sqlMBRIntersects = 'MbrIntersects(Geomtry,{0})'
self.sqlMBROverlaps = 'MbrOverlaps(Geomtry,{0})'
def getGeoType(self):
type = self.sqlSelect, (self.sqlGeomType, self.name)
self.curX(type)
self.type = self.curOne()[0]
def geometryAsText(self):
def getGeoms(self,rows):
def getMBR(self,rows):
def srid(self):
def sridScrape(self, tablename):
sql = "SELECT SRID FROM geom_cols_ref_sys WHERE f_table_name = '%s'" % tablename
self.cursorspatial.execute(sql)
srid = self.cursorspatial.fetchone()[0]
return srid
def parseGeo(self, geometry):
if geometry.find('POINT')!= -1:
geom = geometry.split('(')[1].replace(')','')
geomlist = map(float,geom.split())
else:
partsList = []
geom = geometry.split('((')[1].replace('))','')
partSplit = geom.split('), (')
for part in partSplit:
geomlist = []
geomsplit = part.split(', ')
for COUNTER,geoms in enumerate(geomsplit):
xy = map(float,geoms.split())
geomlist.append(xy)
partsList.append(geomlist)
return partsList
def reverseParseGeo(self, shpReader):
geomtype = find_key(self.fieldTypes, shpReader.shapeType )
if geomtype == 'POINT':
WKTlist = []
WKTtemplate = 'POINT(%f %f)'
shapes = shpReader.shapes()
for shape in shapes:
pnt = shape.points[0]
WKT = WKTtemplate % (pnt[0], pnt[1])
WKTlist.append(WKT)
elif geomtype == 'POLYGON':
WKTtemplate = 'POLYGON(('
WKTlist = []
shapes = shpReader.shapes()
for shape in shapes:
WKT = W
|
update
|
identifier_name
|
Tablemodels.py
|
.database = db
self.connect = connection
self.cursor = cursor
self.valueDic = {}
self.rows = []
self.fields = []
self.fieldTypeDic = {}
self.fieldTypes = {'DOUBLE': 'D',
'FLOAT':'F',
'INTEGER': 'N',
'BOOL': 'C',
'REAL': 'F',
'TEXT':'C',
'POLYGON':5,
'POINT':1,
'LINESTRING':3,
}
def sqlCommands(self):
'sql for retrieval and insertion'
self.sqlFieldInfo = 'PRAGMA table_info({0})'
self.sqlSelect = 'SELECT {0} FROM {1} '
self.sqlInsert = 'INSERT INTO {0}({1}) VALUES ({2})'
self.sqlCreate = "SELECT sql FROM sqlite_master WHERE type = 'table' AND name = '{0}'"
self.sqlUpdate = "UPDATE {0} "
self.sqlSet = "SET {0} = ? "
self.sqlSelectByID = 'PK_UID BETWEEN ? AND ?'
self.sqlWhere = 'WHERE {0}'
def fieldsGen(self):
'get table fields and create name:type dictionary'
fields = self.sqlFieldInfo, self.name
self.curX(fields)
retFields = self.curAll()
for field in retFields:
fieldval = field[1]
typeval = field[2]
self.fieldTypeDic[fieldval] = typeval
self.fields.append(fieldval)
def getFieldsByType(self, type):
'get name fo fields for a given field type'
typeResults = find_key(self.fieldDic, type)
return typeResults
def find_key(self,dic, val):
"""return the key of dictionary dic given the value"""
return [k for k, v in dic.iteritems() if v == val][0]
def changeFieldOrder(self):
|
def valuesGen(self,params,min=0,max=None):
'get selected rows of data'
self.rows=[]
if max == None:
params = (fields, self.name)
select = self.sqlSelect, params
else:
params = (fields, self.name)
args= min, max
select = self.sqlSelect+ self.sqlSelectByID, params, args
self.curX(select)
valResults = self.curAll()
for COUNTER, field in enumerate(self.fieldDic):
self.valueDic[field] = []
for COUNTY,value in enumerate(valResults):
dic = {}
dic[COUNTY] = value[COUNTER]
self.valueDic[field].append(dic)
self.rows.append(value))
def tableDicGen(self):
'create tkintertable representation of selected data '
dataDic = {}
valueRange = len(self.rows)
for i in range(1,valueRange+1):
dataDic[str(i)] = {}
for COUNTROW, row in enumerate(self.rows):
for COUNTERX, field in enumerate(self.fields):
for value in row:
dataDic[COUNTROW][field] = value
self.tableDic = dataDic
def csvRowsGen(self,sep=','):
'create CSV representation of selected data'
for COUNTER, column in enumerate(self.fields):
header = ''
if COUNTER != len(self.field)-1:
header += column + sep
else:
header += column +'\n'
self.csvFields = header
self.csvRows = []
for row in self.rows:
line = ''
for val in row:
if COUNTER != len(row)-1:
line += val + sep
else:
line += val + '\n'
self.csvRows.append(line)
def xlsDicGen(self):
'create xls representation of selected data'
self.xlsFields - self.fields
self.xlsRows = self.rows
def insert(self, params, args):
'insert into table'
insertsql = self.sqlInsert, params, args
self.curX(insertsql)
self.connect.commit()
def constraintGen(self,params):
where = self.sqlWhere.format(params)
def update(self, params, args, where=None):
newparams = ''
for param in params:
if count == len(params)-1:
newparams = self.sqlSet.format(param) + ','
else:
newparams = self.sqlSet.format(param)
if where:
updatesql = self.sqlUpdate.format(self.name) + self.constraintGen(where), args
else
updatesql = self.sqlUpdate.format(self.name) + newparams,args
self.curX(updatesql)
self.connect.commit()
def curX(self,sql,args=()):
'execute sql statements'
statement = sql[0]
params = sql[1]
self.cursor.execute(statement.format(*params), *args)
return
def curIn(self,sql):
'execute sql statements'
self.curX(sql)
self.connect.commit()
return
def curAll(self,):
results = self.cursor.fetchall()
return results
def curOne(self,):
self.cursor.fetchone()
return
class GeoTableModel(TableModel):
def geoSQL(self):
self.sqlTransform = 'Transform(Geometry,{0}')
self.sqlGeomFromText = 'ST_GeomFromText({0})'
self.sqlPointFromText = 'PointFromText({0},{1})'
self.sqlLineFromText = 'LineFromText({0},{0})'
self.sqlNumInteriorRings = 'NumInteriorRing({0})'
self.sqlGeometryRow = 'GeometryN({0})'
self.sqlGeomAsText = 'ST_GeomAsText({0})'
self.sqlEnvelope = 'Envelope(Geometry)'
self.sqlSrid = 'SRID(Geometry)'
self.sqlArea = 'ST_Area(Geometry)'
self.sqlIntersect = 'Intersects(Geometry,{0}) '
self.sqlWithin = 'Within(Geometry,{0}) '
self.sqlContains = 'Contains(Geometry,{0}) '
self.sqlIntersection = 'Intersection(Geometry,{0}) '
self.sqlCrosses = 'Crosses(Geometry,{0})'
self.sqlBuildMBR = 'BuildMBR({0}) '
self.sqlMBRMaxX = 'MBRMaxX(Geometry) '
self.sqlMBRMaxY = 'MBRMaxY(Geometry) '
self.sqlMBRMinX = 'MBRMinX(Geometry) '
self.sqlMBRMinY = 'MBRMinY(Geometry) '
self.sqlCentroid = 'Centroid(Geometry)'
self.sqlGeomType = 'GeometryType(Geometry'
self.sqlX = 'X(Geometry)'
self.sqlY = 'Y(Geometry)'
self.sqlBuffer = 'Buffer(Geometry, {0})
self.sqlMBRContains = 'MbrContains(Geomtry,{0})'
self.sqlMBRWithin = 'MbrWithin(Geomtry,{0})'
self.sqlMBRIntersects = 'MbrIntersects(Geomtry,{0})'
self.sqlMBROverlaps = 'MbrOverlaps(Geomtry,{0})'
def getGeoType(self):
type = self.sqlSelect, (self.sqlGeomType, self.name)
self.curX(type)
self.type = self.curOne()[0]
def geometryAsText(self):
def getGeoms(self,rows):
def getMBR(self,rows):
def srid(self):
def sridScrape(self, tablename):
sql = "SELECT SRID FROM geom_cols_ref_sys WHERE f_table_name = '%s'" % tablename
self.cursorspatial.execute(sql)
srid = self.cursorspatial.fetchone()[0]
return srid
def parseGeo(self, geometry):
if geometry.find('POINT')!= -1:
geom = geometry.split('(')[1].replace(')','')
geomlist = map(float,geom.split())
else:
partsList = []
geom = geometry.split('((')[1].replace('))','')
partSplit = geom.split('), (')
for part in partSplit:
geomlist = []
geomsplit = part.split(', ')
for COUNTER,geoms in enumerate(geomsplit):
xy = map(float,geoms.split())
geomlist.append(xy)
partsList.append(geomlist)
return partsList
def reverseParseGeo(self, shpReader):
geomtype = find_key(self.fieldTypes, shpReader.shapeType )
if geomtype == 'POINT':
WKTlist = []
WKTtemplate = 'POINT(%f %f)'
shapes = shpReader.shapes()
for shape in shapes:
pnt = shape.points[0]
WKT = WKTtemplate % (pnt[0], pnt[1])
WKTlist.append(WKT)
elif geomtype == 'POLYGON':
WKTtemplate = 'POLYGON(('
WKTlist = []
shapes = shpReader.shapes()
for shape in shapes:
WKT = WKT
|
'adjust recorded order of fields'
return
|
identifier_body
|
refract.py
|
.220, 6.000, 6.840,
7.740, 8.690, 9.690, 10.720, 11.830, 0.000,
0.020, 0.011, 0.011, 0.089, 0.079, 0.079 ]
a3 = [ 8.40E-3, 8.50E-3, 8.60E-3, 8.70E-3, 8.90E-3, 9.20E-3,
9.40E-3, 9.70E-3, 10.00E-3, 10.20E-3, 10.50E-3, 10.79E-3,
11.10E-3, 16.46E-3, 11.44E-3, 11.81E-3, 12.21E-3, 12.66E-3,
14.49E-3, 13.19E-3, 13.60E-3, 13.82E-3, 12.97E-3, 12.48E-3,
12.07E-3, 11.71E-3, 14.68E-3, 11.39E-3, 11.08E-3, 10.78E-3,
10.50E-3, 10.20E-3, 10.00E-3, 9.70E-3, 9.40E-3, 9.20E-3,
8.90E-3, 8.70E-3, 8.60E-3, 8.50E-3, 8.40E-3, 15.92E-3,
19.20E-3, 19.16E-3, 19.20E-3, 18.10E-3, 18.10E-3, 18.10E-3 ]
a4 = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6 ]
a5 = [ 5.60E-3, 5.60E-3, 5.60E-3, 5.50E-3, 5.60E-3, 5.50E-3,
5.70E-3, 5.30E-3, 5.40E-3, 4.80E-3, 4.80E-3, 4.17E-3,
3.75E-3, 7.74E-3, 2.97E-3, 2.12E-3, 0.94E-3, -0.55E-3,
5.97E-3, -2.44E-3, 3.44E-3, -4.13E-3, 1.32E-3, -0.36E-3,
-1.59E-3, -2.66E-3, -4.77E-3, -3.34E-3, -4.17E-3, -4.48E-3,
-5.10E-3, -5.10E-3, -5.70E-3, -5.50E-3, -5.90E-3, -5.60E-3,
-5.80E-3, -5.70E-3, -5.60E-3, -5.60E-3, -5.60E-3, -0.44E-3,
0.00E00, 0.00E00, 0.00E00, 0.00E00, 0.00E00, 0.00E00 ]
|
0.8, 0.1, 0.5, 0.7, -1.0, 5.8,
2.9, 2.3, 0.9, 2.2, 2.0, 2.0,
1.8, 1.9, 1.8, 1.8, 1.7, 1.8,
1.7, 1.7, 1.7, 1.7, 1.7, 0.9,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
# Convert to the units of Liebe.
theta = 300.0 / T
e = 0.001 * Pvap
p = 0.001 * Pdry
f = nu * 1e-9
ap = 1.4e-10 * (1.0 - 1.2e-5 * f ** 1.5)
gamma0 = 5.6e-3 * (p + 1.1 * e) * theta ** 0.8
nr = 2.588 * p * theta + 3.07e-4 * (1.0 / (1.0 + (f / gamma0) ** 2) - 1.0) * p * theta * theta
ni = (2.0 * 3.07e-4 / (gamma0 * (1.0 + (f / gamma0) ** 2) * (1.0 + (f / 60.0) ** 2)) +
ap * p * theta ** 2.5) * f * p * theta * theta
# Sum the contributions of the lines.
for i in xrange(0, len(nu0)):
S = a1[i] * p * theta ** 3 * math.exp(a2[i] * (1.0 - theta))
gamma = a3[i] * (p * theta ** (0.8 - a4[i])) + 1.1 * e * theta
delta = a5[i] * p * theta ** a6[i]
x = (nu0[i] - f) * (nu0[i] - f) + gamma * gamma
y = (nu0[i] + f) * (nu0[i] + f) + gamma * gamma
z = (nu0[i] + gamma * gamma / nu0[i])
nr = nr + S * ((z - f)
|
a6 = [ 1.7, 1.7, 1.7, 1.7, 1.8, 1.8,
1.8, 1.9, 1.8, 2.0, 1.9, 2.1,
2.1, 0.9, 2.3, 2.5, 3.7, -3.1,
|
random_line_split
|
refract.py
|
504.482692,
556.936002, 620.700807, 658.006500, 752.033227, 841.073593, 859.865000,
899.407000, 902.555000, 906.205524, 916.171582, 970.315022, 987.926764 ]
b1 = [ 0.1090, 0.0011, 0.0007, 2.3000, 0.0464, 1.5400,
0.0010, 11.9000, 0.0044, 0.0637, 0.9210, 0.1940,
10.6000, 0.3300, 1.2800, 0.2530, 0.0374, 0.0125,
510.0000, 5.0900, 0.2740, 250.0000, 0.0130, 0.1330,
0.0550, 0.0380, 0.1830, 8.5600, 9.1600, 138.000 ]
b2 = [ 2.143, 8.730, 8.347, 0.653, 6.156, 1.515,
9.802, 1.018, 7.318, 5.015, 3.561, 5.015,
1.370, 3.561, 2.342, 2.814, 6.693, 6.693,
0.114, 2.150, 7.767, 0.336, 8.113, 7.989,
7.845, 8.360, 5.039, 1.369, 1.842, 0.178 ]
b3 = [ 27.84E-3, 27.60E-3, 27.00E-3, 28.35E-3, 21.40E-3, 27.00E-3,
26.50E-3, 27.60E-3, 19.00E-3, 13.70E-3, 16.40E-3, 14.40E-3,
23.80E-3, 18.20E-3, 19.80E-3, 24.90E-3, 11.50E-3, 11.90E-3,
30.00E-3, 22.30E-3, 30.00E-3, 28.60E-3, 14.10E-3, 28.60E-3,
28.60E-3, 26.40E-3, 23.40E-3, 25.30E-3, 24.00E-3, 28.60E-3 ]
# Convert to the units of Liebe.
theta = 300.0 / T
e = 0.001 * Pvap
p = 0.001 * Pdry
f = nu * 1e-9
nr = 2.39 * e * theta + 41.6 * e * theta * theta + 6.47e-6 * f ** 2.05 * e * theta ** 2.4
ni = (0.915 * 1.40e-6 * p + 5.41e-5 * e * theta * theta * theta) * f * e * theta ** 2.5
# Sum the contributions of the lines.
for i in xrange(0, len(mnu0)):
S = b1[i] * e * theta ** 3.5 * math.exp(b2[i] * (1.0 - theta))
gamma = b3[i] * (p * theta ** 0.8 + 4.80 * e * theta)
x = (mnu0[i] - f) * (mnu0[i] - f) + gamma * gamma
y = (mnu0[i] + f) * (mnu0[i] + f) + gamma * gamma
z = (mnu0[i] + gamma * gamma / mnu0[i])
nr = nr + S * ((z - f) / x + (z + f) / y - 2 / mnu0[i])
ni = ni + S * ((1.0 / x + 1.0 / y) * gamma * f / mnu0[i])
# Return the result.
return complex(nr, ni)
def pvapsat(T):
# From Miriad; Determine the saturation pressure of water vapour.
# Input:
# T = temperature (K)
#
# Output:
# vapour saturation pressure (Pa)
if (T > 215):
theta = 300.0 / T
return 1e5 / (41.51 * (theta ** -5) * (10 **(9.384 * theta - 10.0)))
else:
return 0.0
def refract(t, pdry, pvap, z, n, nu, T0, el):
# From Miriad; Compute refractive index for an atmosphere.
# Determine the sky brightness and excess path lengths for a parallel
# slab atmosphere. Liebe's model (1985) is used to determine the complex
# refractive index of air.
#
# Input:
# n = the number of atmospheric layers.
# t = temperature of the layers. T[0] is the temperature at the lowest
# layer (K)
# Pdry = partial pressure of the dry components (Pa)
# Pvap = partial pressure of the water vapour components (Pa)
# z = height of the layer.
# nu = frequency of interest (Hz)
# T0 = astronomical brightness temperature (K)
# el = elevation angle of the source above the atmosphere (rad)
#
# Output:
# { 'Tb' = brightness temperature (K),
# 'tau' = opacity (nepers)
# 'Ldry' = excess path, dry component (m)
# 'Lvap' = excess path, water vapour component (m) }
# Some constants.
|
HMKS = 6.6260755e-34 # Planck constant, J.s
KMKS = 1.380658e-23 # Boltzmann constant, J/K
CMKS = 299792458 # Speed of light, m/s
tau = 0.0
Tb = HMKS * nu / (KMKS * (math.exp(HMKS * nu / (KMKS * T0)) - 1))
Ldry = 0.0
Lvap = 0.0
snell = math.sin(el)
for i in xrange(n, 0, -1):
if (i == 1):
dz = 0.5 * (z[1] - z[0])
elif (i == n):
dz = 0.5 * (z[n] - z[n - 1])
else:
dz = 0.5 * (z[i + 1] - z[i - 1])
Ndry = refdry(nu, t[i], pdry[i], pvap[i])
Nvap = refvap(nu, t[i], pdry[i], pvap[i])
nr = 1 + (Ndry.real + Nvap.real) * 1e-6
|
identifier_body
|
|
refract.py
|
f / gamma0) ** 2) - 1.0) * p * theta * theta
ni = (2.0 * 3.07e-4 / (gamma0 * (1.0 + (f / gamma0) ** 2) * (1.0 + (f / 60.0) ** 2)) +
ap * p * theta ** 2.5) * f * p * theta * theta
# Sum the contributions of the lines.
for i in xrange(0, len(nu0)):
S = a1[i] * p * theta ** 3 * math.exp(a2[i] * (1.0 - theta))
gamma = a3[i] * (p * theta ** (0.8 - a4[i])) + 1.1 * e * theta
delta = a5[i] * p * theta ** a6[i]
x = (nu0[i] - f) * (nu0[i] - f) + gamma * gamma
y = (nu0[i] + f) * (nu0[i] + f) + gamma * gamma
z = (nu0[i] + gamma * gamma / nu0[i])
nr = nr + S * ((z - f) / x + (z + f) / y - 2 / nu0[i] + delta *
(1.0 / x - 1.0 / y) * gamma * f / nu0[i])
ni = ni + S * ((1.0 / x + 1.0 / y) * gamma * f / nu0[i] - delta *
((nu0[i] - f) / x + (nu0[i] + f) / y) * f / nu0[i])
# Return the result.
return complex(nr, ni)
def refvap(nu, T, Pdry, Pvap):
# From Miriad; Determine the complex refractivity of the water vapour monomers.
#
# Inputs:
# nu = observating frequency (Hz)
# T = temperature (K)
# Pdry = partial pressure of dry components (Pa)
# Pvap = partial pressure of water vapour (Pa)
# Table of the microwave water lines.
mnu0 = [ 22.235080, 67.813960, 119.995940, 183.310117, 321.225644, 325.152919,
336.187000, 380.197372, 390.134508, 437.346667, 439.150812, 443.018295,
448.001075, 470.888947, 474.689127, 488.491133, 503.568532, 504.482692,
556.936002, 620.700807, 658.006500, 752.033227, 841.073593, 859.865000,
899.407000, 902.555000, 906.205524, 916.171582, 970.315022, 987.926764 ]
b1 = [ 0.1090, 0.0011, 0.0007, 2.3000, 0.0464, 1.5400,
0.0010, 11.9000, 0.0044, 0.0637, 0.9210, 0.1940,
10.6000, 0.3300, 1.2800, 0.2530, 0.0374, 0.0125,
510.0000, 5.0900, 0.2740, 250.0000, 0.0130, 0.1330,
0.0550, 0.0380, 0.1830, 8.5600, 9.1600, 138.000 ]
b2 = [ 2.143, 8.730, 8.347, 0.653, 6.156, 1.515,
9.802, 1.018, 7.318, 5.015, 3.561, 5.015,
1.370, 3.561, 2.342, 2.814, 6.693, 6.693,
0.114, 2.150, 7.767, 0.336, 8.113, 7.989,
7.845, 8.360, 5.039, 1.369, 1.842, 0.178 ]
b3 = [ 27.84E-3, 27.60E-3, 27.00E-3, 28.35E-3, 21.40E-3, 27.00E-3,
26.50E-3, 27.60E-3, 19.00E-3, 13.70E-3, 16.40E-3, 14.40E-3,
23.80E-3, 18.20E-3, 19.80E-3, 24.90E-3, 11.50E-3, 11.90E-3,
30.00E-3, 22.30E-3, 30.00E-3, 28.60E-3, 14.10E-3, 28.60E-3,
28.60E-3, 26.40E-3, 23.40E-3, 25.30E-3, 24.00E-3, 28.60E-3 ]
# Convert to the units of Liebe.
theta = 300.0 / T
e = 0.001 * Pvap
p = 0.001 * Pdry
f = nu * 1e-9
nr = 2.39 * e * theta + 41.6 * e * theta * theta + 6.47e-6 * f ** 2.05 * e * theta ** 2.4
ni = (0.915 * 1.40e-6 * p + 5.41e-5 * e * theta * theta * theta) * f * e * theta ** 2.5
# Sum the contributions of the lines.
for i in xrange(0, len(mnu0)):
|
S = b1[i] * e * theta ** 3.5 * math.exp(b2[i] * (1.0 - theta))
gamma = b3[i] * (p * theta ** 0.8 + 4.80 * e * theta)
x = (mnu0[i] - f) * (mnu0[i] - f) + gamma * gamma
y = (mnu0[i] + f) * (mnu0[i] + f) + gamma * gamma
z = (mnu0[i] + gamma * gamma / mnu0[i])
nr = nr + S * ((z - f) / x + (z + f) / y - 2 / mnu0[i])
ni = ni + S * ((1.0 / x + 1.0 / y) * gamma * f / mnu0[i])
|
conditional_block
|
|
refract.py
|
] * (1.0 - theta))
gamma = a3[i] * (p * theta ** (0.8 - a4[i])) + 1.1 * e * theta
delta = a5[i] * p * theta ** a6[i]
x = (nu0[i] - f) * (nu0[i] - f) + gamma * gamma
y = (nu0[i] + f) * (nu0[i] + f) + gamma * gamma
z = (nu0[i] + gamma * gamma / nu0[i])
nr = nr + S * ((z - f) / x + (z + f) / y - 2 / nu0[i] + delta *
(1.0 / x - 1.0 / y) * gamma * f / nu0[i])
ni = ni + S * ((1.0 / x + 1.0 / y) * gamma * f / nu0[i] - delta *
((nu0[i] - f) / x + (nu0[i] + f) / y) * f / nu0[i])
# Return the result.
return complex(nr, ni)
def refvap(nu, T, Pdry, Pvap):
# From Miriad; Determine the complex refractivity of the water vapour monomers.
#
# Inputs:
# nu = observating frequency (Hz)
# T = temperature (K)
# Pdry = partial pressure of dry components (Pa)
# Pvap = partial pressure of water vapour (Pa)
# Table of the microwave water lines.
mnu0 = [ 22.235080, 67.813960, 119.995940, 183.310117, 321.225644, 325.152919,
336.187000, 380.197372, 390.134508, 437.346667, 439.150812, 443.018295,
448.001075, 470.888947, 474.689127, 488.491133, 503.568532, 504.482692,
556.936002, 620.700807, 658.006500, 752.033227, 841.073593, 859.865000,
899.407000, 902.555000, 906.205524, 916.171582, 970.315022, 987.926764 ]
b1 = [ 0.1090, 0.0011, 0.0007, 2.3000, 0.0464, 1.5400,
0.0010, 11.9000, 0.0044, 0.0637, 0.9210, 0.1940,
10.6000, 0.3300, 1.2800, 0.2530, 0.0374, 0.0125,
510.0000, 5.0900, 0.2740, 250.0000, 0.0130, 0.1330,
0.0550, 0.0380, 0.1830, 8.5600, 9.1600, 138.000 ]
b2 = [ 2.143, 8.730, 8.347, 0.653, 6.156, 1.515,
9.802, 1.018, 7.318, 5.015, 3.561, 5.015,
1.370, 3.561, 2.342, 2.814, 6.693, 6.693,
0.114, 2.150, 7.767, 0.336, 8.113, 7.989,
7.845, 8.360, 5.039, 1.369, 1.842, 0.178 ]
b3 = [ 27.84E-3, 27.60E-3, 27.00E-3, 28.35E-3, 21.40E-3, 27.00E-3,
26.50E-3, 27.60E-3, 19.00E-3, 13.70E-3, 16.40E-3, 14.40E-3,
23.80E-3, 18.20E-3, 19.80E-3, 24.90E-3, 11.50E-3, 11.90E-3,
30.00E-3, 22.30E-3, 30.00E-3, 28.60E-3, 14.10E-3, 28.60E-3,
28.60E-3, 26.40E-3, 23.40E-3, 25.30E-3, 24.00E-3, 28.60E-3 ]
# Convert to the units of Liebe.
theta = 300.0 / T
e = 0.001 * Pvap
p = 0.001 * Pdry
f = nu * 1e-9
nr = 2.39 * e * theta + 41.6 * e * theta * theta + 6.47e-6 * f ** 2.05 * e * theta ** 2.4
ni = (0.915 * 1.40e-6 * p + 5.41e-5 * e * theta * theta * theta) * f * e * theta ** 2.5
# Sum the contributions of the lines.
for i in xrange(0, len(mnu0)):
S = b1[i] * e * theta ** 3.5 * math.exp(b2[i] * (1.0 - theta))
gamma = b3[i] * (p * theta ** 0.8 + 4.80 * e * theta)
x = (mnu0[i] - f) * (mnu0[i] - f) + gamma * gamma
y = (mnu0[i] + f) * (mnu0[i] + f) + gamma * gamma
z = (mnu0[i] + gamma * gamma / mnu0[i])
nr = nr + S * ((z - f) / x + (z + f) / y - 2 / mnu0[i])
ni = ni + S * ((1.0 / x + 1.0 / y) * gamma * f / mnu0[i])
# Return the result.
return complex(nr, ni)
def pvapsat(T):
# From Miriad; Determine the saturation pressure of water vapour.
# Input:
# T = temperature (K)
#
# Output:
# vapour saturation pressure (Pa)
if (T > 215):
theta = 300.0 / T
return 1e5 / (41.51 * (theta ** -5) * (10 **(9.384 * theta - 10.0)))
else:
return 0.0
def
|
refract
|
identifier_name
|
|
admin.js
|
;
}
}
},
playerUpdate: function(user, x){
if (Meteor.isServer){
var result = Meteor.users.update({username: user}, {$set: {profile: x}});
return result;
}
},
grabRecord: function(user){
if (Meteor.isServer){
var result = Meteor.users.find({username: user}).fetch()[0].profile;
console.log("server grabbed user record: " + user);
// if (Meteor.users.find({username:user}).fetch()[0].shadow.duel.new.length===0){
// flag2="duel"; // what does this do
// }
return result;
}
},
shadowReboot: function(user){
if (Meteor.isServer){
console.log("Monthday test: " + monthday + "(shadowreboot");
var result = Meteor.users.find({username: user}).fetch()[0].shadow;
result.loginsWeb++;
var result2 = Meteor.users.update({username: user}, {$set: {shadow: result}});
console.log("shadowreboot success");
}
},
checkUser: function(user,job,input){
if (Meteor.isServer){
var stub = Meteor.users.find({username: user}).fetch()[0].shadow
if (job==="start"){
var result={duel:false,duelghost:[],drink:false}
if (stub.duelflag===true){
console.log(user + " was in a duel");
result.duel=true;
for (i=0;i<stub.duel.new.length;i++){
// if user has been in more than one duel, this pushes their challengers' names & W/L in array
result.duelghost.push(stub.duel.new[i]);
}
}
if (stub.tavernflag===true){
result.drink=true;
}
logging(user);
return result;
} else if (job==="getdrink"){
var temp="";
for(i=0;i<stub.drinks.recd.length;i++){
temp += stub.drinks.recd[i].type.name + " from " + stub.drinks.recd[i].sender + "<br>";
}
if (stub.drinks.recd.length>1){
var out = "You have " + stub.drinks.recd.length + " drinks awaiting you:<br>" + temp;
} else {
var out = "You have a drink awaiting you:<br>" + "<ul>" + temp + "</ul>";
}
return out;
} else if (job==="grabdrink"){
var temp = {drink:undefined,position:undefined};
var res;
function findsender(element,index,array)
|
stub.drinks.recd.forEach(findsender);
if (typeof(res)==="number"){
temp.position=res;
temp.drink=stub.drinks.recd[res];
return temp;
}
else return false;
} else if (job==="movedrink"){
var recd = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd[input];
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":recd}});
var mover = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd
var spliced = mover.splice(input,1);
Meteor.users.update({username:user},{$set:{"shadow.drinks.recd":mover}});
if (Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd.length===0){
Meteor.users.update({username:user},{$set:{"shadow.tavernflag":false}});
return false;
} else return true;
} else if (job==="senddrink"){
var recipient = input.to;
var topush = {sender:user, type: input.type, msg: input.msg}
Meteor.users.update({username:recipient},{$push:{"shadow.drinks.recd":topush}});
Meteor.users.update({username:recipient},{$set:{"shadow.tavernflag":true}});
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":topush}});
console.log(user + " sent a drink to " + recipient);
// Shadow.update({username:recipient},{$push:{"profile.drinks.recd":topush}});
// Shadow.update({username:recipient},{$set:{"profile.tavernflag":true}});
// Shadow.update({username:user},{$push:{"profile.drinks.sent":topush}});
// returns the # of affected docs, so maybe try consoling that out
} else if (job==="checkdupdrink"){
var recipient=input;
var temp;
function checkdups(x,y,z){
if(x.sender===user){
temp=true;
}
}
Meteor.users.find({username:recipient}).fetch()[0].shadow.drinks.recd.forEach(checkdups)
// Shadow.find({username:recipient}).fetch()[0].profile.drinks.recd.forEach(checkdups)
if (temp) return true;
// true means recipient already has a drink from the sender
else return false;
// go for it!
}
}
},
userduel: function(player,target,type){
if (Meteor.isServer) {
if (type==="notify"){
Meteor.users.update({username:player},{$set:{"shadow.duelflag":false}});
// Shadow.update({username:player},{$set:{"profile.duelflag":false}});
var temp = Meteor.users.find({username:player}).fetch()[0].shadow.duel.new;
for (i=0;i<temp.length;i++){
Meteor.users.update({username:player},{$push:{"shadow.duel.old":temp[i]}});
// Shadow.update({username:player},{$push:{"profile.duel.old":temp[i]}});
}
Meteor.users.update({username:player},{$set:{"shadow.duel.new":[]}});
// Shadow.update({username:player},{$set:{"profile.duel.new":[]}});
} else {
var name = target.username;
var pname = player.username;
duelmodel.opp = pname;
if (type==="win"){
// player won, target lost, so push a duelmodel to target's acct with a big fat L
var temp = Math.round(target.gold*0.5);
duelmodel.result = "l";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and won");
} else if (type==="lose"){
var temp = Math.round(player.gold*0.5);
temp += target.gold;
duelmodel.result = "w";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and lost");
}
Meteor.users.update({username:name},{$set:{"shadow.duelflag":true}});
Meteor.users.update({username:name},{$push:{"shadow.duel.new":duelmodel}});
// Shadow.update({username:name},{$set:{"profile.duelflag":true}});
// Shadow.update({username:name},{$push:{"profile.duel.new":duelmodel}});
}
}
},
loggingout: function(user){
var temp = new Date();
var month = temp.getMonth() + 1;
var day = temp.getDate();
var date = month + "-" + day;
date = date.toString();
var esthrs = temp.getUTCHours() - 5;
var estmins = temp.getMinutes();
var time = esthrs + ":" + estmins;
var log = {username: user, logout_time:time}
var temp2 = Log.update({date:date},{$push: {"activity":log}});
console.log(user + " logged out at " + date + " " + time);
},
acts: function(user,event,z){
if (Meteor.isServer){
if (event==="startcheck"){
Meteor.users.update({username:user},{$set: {"shadow.lastlogin":monthday}});
Meteor.users.update({username:user},{$inc:{"shadow.logins":1}});
} else if (event==="level"){
console.log(user + " advanced to level " + z);
var newstuff = Activity.find({date:monthday}).fetch()[0].activity;
if (z===2){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.apprentice.name + "!";
} else if (z===3){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.challenger.name + "!";
} else if (z===4){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.journeyman.name + "!";
}
var temp2 = Activity.update({date:monthday},{$
|
{
if (element.sender.toLowerCase()===input){
res=index;
}
}
|
identifier_body
|
admin.js
|
;
}
}
},
playerUpdate: function(user, x){
if (Meteor.isServer){
var result = Meteor.users.update({username: user}, {$set: {profile: x}});
return result;
}
},
grabRecord: function(user){
if (Meteor.isServer){
var result = Meteor.users.find({username: user}).fetch()[0].profile;
console.log("server grabbed user record: " + user);
// if (Meteor.users.find({username:user}).fetch()[0].shadow.duel.new.length===0){
// flag2="duel"; // what does this do
// }
return result;
}
},
shadowReboot: function(user){
if (Meteor.isServer){
console.log("Monthday test: " + monthday + "(shadowreboot");
var result = Meteor.users.find({username: user}).fetch()[0].shadow;
result.loginsWeb++;
var result2 = Meteor.users.update({username: user}, {$set: {shadow: result}});
console.log("shadowreboot success");
}
},
checkUser: function(user,job,input){
if (Meteor.isServer){
var stub = Meteor.users.find({username: user}).fetch()[0].shadow
if (job==="start"){
var result={duel:false,duelghost:[],drink:false}
if (stub.duelflag===true){
console.log(user + " was in a duel");
result.duel=true;
for (i=0;i<stub.duel.new.length;i++){
// if user has been in more than one duel, this pushes their challengers' names & W/L in array
result.duelghost.push(stub.duel.new[i]);
}
}
if (stub.tavernflag===true){
result.drink=true;
}
logging(user);
return result;
} else if (job==="getdrink"){
var temp="";
for(i=0;i<stub.drinks.recd.length;i++){
temp += stub.drinks.recd[i].type.name + " from " + stub.drinks.recd[i].sender + "<br>";
}
if (stub.drinks.recd.length>1){
var out = "You have " + stub.drinks.recd.length + " drinks awaiting you:<br>" + temp;
} else {
var out = "You have a drink awaiting you:<br>" + "<ul>" + temp + "</ul>";
}
return out;
} else if (job==="grabdrink"){
var temp = {drink:undefined,position:undefined};
var res;
function
|
(element,index,array){
if (element.sender.toLowerCase()===input){
res=index;
}
}
stub.drinks.recd.forEach(findsender);
if (typeof(res)==="number"){
temp.position=res;
temp.drink=stub.drinks.recd[res];
return temp;
}
else return false;
} else if (job==="movedrink"){
var recd = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd[input];
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":recd}});
var mover = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd
var spliced = mover.splice(input,1);
Meteor.users.update({username:user},{$set:{"shadow.drinks.recd":mover}});
if (Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd.length===0){
Meteor.users.update({username:user},{$set:{"shadow.tavernflag":false}});
return false;
} else return true;
} else if (job==="senddrink"){
var recipient = input.to;
var topush = {sender:user, type: input.type, msg: input.msg}
Meteor.users.update({username:recipient},{$push:{"shadow.drinks.recd":topush}});
Meteor.users.update({username:recipient},{$set:{"shadow.tavernflag":true}});
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":topush}});
console.log(user + " sent a drink to " + recipient);
// Shadow.update({username:recipient},{$push:{"profile.drinks.recd":topush}});
// Shadow.update({username:recipient},{$set:{"profile.tavernflag":true}});
// Shadow.update({username:user},{$push:{"profile.drinks.sent":topush}});
// returns the # of affected docs, so maybe try consoling that out
} else if (job==="checkdupdrink"){
var recipient=input;
var temp;
function checkdups(x,y,z){
if(x.sender===user){
temp=true;
}
}
Meteor.users.find({username:recipient}).fetch()[0].shadow.drinks.recd.forEach(checkdups)
// Shadow.find({username:recipient}).fetch()[0].profile.drinks.recd.forEach(checkdups)
if (temp) return true;
// true means recipient already has a drink from the sender
else return false;
// go for it!
}
}
},
userduel: function(player,target,type){
if (Meteor.isServer) {
if (type==="notify"){
Meteor.users.update({username:player},{$set:{"shadow.duelflag":false}});
// Shadow.update({username:player},{$set:{"profile.duelflag":false}});
var temp = Meteor.users.find({username:player}).fetch()[0].shadow.duel.new;
for (i=0;i<temp.length;i++){
Meteor.users.update({username:player},{$push:{"shadow.duel.old":temp[i]}});
// Shadow.update({username:player},{$push:{"profile.duel.old":temp[i]}});
}
Meteor.users.update({username:player},{$set:{"shadow.duel.new":[]}});
// Shadow.update({username:player},{$set:{"profile.duel.new":[]}});
} else {
var name = target.username;
var pname = player.username;
duelmodel.opp = pname;
if (type==="win"){
// player won, target lost, so push a duelmodel to target's acct with a big fat L
var temp = Math.round(target.gold*0.5);
duelmodel.result = "l";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and won");
} else if (type==="lose"){
var temp = Math.round(player.gold*0.5);
temp += target.gold;
duelmodel.result = "w";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and lost");
}
Meteor.users.update({username:name},{$set:{"shadow.duelflag":true}});
Meteor.users.update({username:name},{$push:{"shadow.duel.new":duelmodel}});
// Shadow.update({username:name},{$set:{"profile.duelflag":true}});
// Shadow.update({username:name},{$push:{"profile.duel.new":duelmodel}});
}
}
},
loggingout: function(user){
var temp = new Date();
var month = temp.getMonth() + 1;
var day = temp.getDate();
var date = month + "-" + day;
date = date.toString();
var esthrs = temp.getUTCHours() - 5;
var estmins = temp.getMinutes();
var time = esthrs + ":" + estmins;
var log = {username: user, logout_time:time}
var temp2 = Log.update({date:date},{$push: {"activity":log}});
console.log(user + " logged out at " + date + " " + time);
},
acts: function(user,event,z){
if (Meteor.isServer){
if (event==="startcheck"){
Meteor.users.update({username:user},{$set: {"shadow.lastlogin":monthday}});
Meteor.users.update({username:user},{$inc:{"shadow.logins":1}});
} else if (event==="level"){
console.log(user + " advanced to level " + z);
var newstuff = Activity.find({date:monthday}).fetch()[0].activity;
if (z===2){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.apprentice.name + "!";
} else if (z===3){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.challenger.name + "!";
} else if (z===4){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.journeyman.name + "!";
}
var temp2 = Activity.update({date:monthday},{$set
|
findsender
|
identifier_name
|
admin.js
|
},
playerSetup: function(user){
if (Meteor.isServer){
console.log("playerSetup checkin");
var temp = shadowprofile;
temp.profileStarted = monthday;
Meteor.users.update({username: user},{$set: {shadow: temp}});
Meteor.users.update({username: user},{$set: {"bank.deposit": 0}});
console.log("NEW USER: " + Meteor.user().username);
logging(user);
}
},
listusers: function(){
// returns all except user
if (Meteor.isServer){
var templist = "";
function listbuild (x,y,z) {
if (x.username!=Meteor.user().username){
templist += "<li>" + x.username + "<br>";
}
}
Meteor.users.find().fetch().forEach(listbuild);
return templist;
}
},
bankUpdate: function(user,action,amt){
if (Meteor.isServer){
if (action==="dep"){
var result = Meteor.users.update({username: user}, {$inc: {"bank.deposit": amt}});
// var result = Bank.update({username: user}, {$inc: {deposit: amt}});
console.log(user + " deposited " + amt);
console.log("deposit update result: " + result);
return result;
} else if (action==="with"){
var result = Meteor.users.update({username: user}, {$inc: {"bank.deposit": -amt}});
// var result = Bank.update({username: user}, {$inc: {deposit: -amt}});
console.log(user + " withdrew " + amt);
return result;
}
}
},
playerUpdate: function(user, x){
if (Meteor.isServer){
var result = Meteor.users.update({username: user}, {$set: {profile: x}});
return result;
}
},
grabRecord: function(user){
if (Meteor.isServer){
var result = Meteor.users.find({username: user}).fetch()[0].profile;
console.log("server grabbed user record: " + user);
// if (Meteor.users.find({username:user}).fetch()[0].shadow.duel.new.length===0){
// flag2="duel"; // what does this do
// }
return result;
}
},
shadowReboot: function(user){
if (Meteor.isServer){
console.log("Monthday test: " + monthday + "(shadowreboot");
var result = Meteor.users.find({username: user}).fetch()[0].shadow;
result.loginsWeb++;
var result2 = Meteor.users.update({username: user}, {$set: {shadow: result}});
console.log("shadowreboot success");
}
},
checkUser: function(user,job,input){
if (Meteor.isServer){
var stub = Meteor.users.find({username: user}).fetch()[0].shadow
if (job==="start"){
var result={duel:false,duelghost:[],drink:false}
if (stub.duelflag===true){
console.log(user + " was in a duel");
result.duel=true;
for (i=0;i<stub.duel.new.length;i++){
// if user has been in more than one duel, this pushes their challengers' names & W/L in array
result.duelghost.push(stub.duel.new[i]);
}
}
if (stub.tavernflag===true){
result.drink=true;
}
logging(user);
return result;
} else if (job==="getdrink"){
var temp="";
for(i=0;i<stub.drinks.recd.length;i++){
temp += stub.drinks.recd[i].type.name + " from " + stub.drinks.recd[i].sender + "<br>";
}
if (stub.drinks.recd.length>1){
var out = "You have " + stub.drinks.recd.length + " drinks awaiting you:<br>" + temp;
} else {
var out = "You have a drink awaiting you:<br>" + "<ul>" + temp + "</ul>";
}
return out;
} else if (job==="grabdrink"){
var temp = {drink:undefined,position:undefined};
var res;
function findsender(element,index,array){
if (element.sender.toLowerCase()===input){
res=index;
}
}
stub.drinks.recd.forEach(findsender);
if (typeof(res)==="number"){
temp.position=res;
temp.drink=stub.drinks.recd[res];
return temp;
}
else return false;
} else if (job==="movedrink"){
var recd = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd[input];
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":recd}});
var mover = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd
var spliced = mover.splice(input,1);
Meteor.users.update({username:user},{$set:{"shadow.drinks.recd":mover}});
if (Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd.length===0){
Meteor.users.update({username:user},{$set:{"shadow.tavernflag":false}});
return false;
} else return true;
} else if (job==="senddrink"){
var recipient = input.to;
var topush = {sender:user, type: input.type, msg: input.msg}
Meteor.users.update({username:recipient},{$push:{"shadow.drinks.recd":topush}});
Meteor.users.update({username:recipient},{$set:{"shadow.tavernflag":true}});
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":topush}});
console.log(user + " sent a drink to " + recipient);
// Shadow.update({username:recipient},{$push:{"profile.drinks.recd":topush}});
// Shadow.update({username:recipient},{$set:{"profile.tavernflag":true}});
// Shadow.update({username:user},{$push:{"profile.drinks.sent":topush}});
// returns the # of affected docs, so maybe try consoling that out
} else if (job==="checkdupdrink"){
var recipient=input;
var temp;
function checkdups(x,y,z){
if(x.sender===user){
temp=true;
}
}
Meteor.users.find({username:recipient}).fetch()[0].shadow.drinks.recd.forEach(checkdups)
// Shadow.find({username:recipient}).fetch()[0].profile.drinks.recd.forEach(checkdups)
if (temp) return true;
// true means recipient already has a drink from the sender
else return false;
// go for it!
}
}
},
userduel: function(player,target,type){
if (Meteor.isServer) {
if (type==="notify"){
Meteor.users.update({username:player},{$set:{"shadow.duelflag":false}});
// Shadow.update({username:player},{$set:{"profile.duelflag":false}});
var temp = Meteor.users.find({username:player}).fetch()[0].shadow.duel.new;
for (i=0;i<temp.length;i++){
Meteor.users.update({username:player},{$push:{"shadow.duel.old":temp[i]}});
// Shadow.update({username:player},{$push:{"profile.duel.old":temp[i]}});
}
Meteor.users.update({username:player},{$set:{"shadow.duel.new":[]}});
// Shadow.update({username:player},{$set:{"profile.duel.new":[]}});
} else {
var name = target.username;
var pname = player.username;
duelmodel.opp = pname;
if (type==="win"){
// player won, target lost, so push a duelmodel to target's acct with a big fat L
var temp = Math.round(target.gold*0.5);
duelmodel.result = "l";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and won");
} else if (type==="lose"){
var temp = Math.round(player.gold*0.5);
temp += target.gold;
duelmodel.result = "w";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and lost");
}
Meteor.users.update({username:name},{$set:{"shadow.duelflag":true}});
Meteor.users.update({username:name},{$push:{"shadow.duel.new":duelmodel}});
// Shadow.update({username:name},{$set:{"profile.duelflag":true}});
// Shadow.update({username:name},{$push:{"profile.duel.new":duelmodel}});
}
|
{
if (Accounts.findUserByUsername(x)){
console.log("Server found: " + Accounts.findUserByUsername(x).username);
return true;
} else {
return false;
}
}
|
conditional_block
|
|
admin.js
|
}
}
},
playerUpdate: function(user, x){
if (Meteor.isServer){
var result = Meteor.users.update({username: user}, {$set: {profile: x}});
return result;
}
},
grabRecord: function(user){
if (Meteor.isServer){
var result = Meteor.users.find({username: user}).fetch()[0].profile;
console.log("server grabbed user record: " + user);
// if (Meteor.users.find({username:user}).fetch()[0].shadow.duel.new.length===0){
// flag2="duel"; // what does this do
// }
return result;
}
},
shadowReboot: function(user){
if (Meteor.isServer){
console.log("Monthday test: " + monthday + "(shadowreboot");
var result = Meteor.users.find({username: user}).fetch()[0].shadow;
result.loginsWeb++;
var result2 = Meteor.users.update({username: user}, {$set: {shadow: result}});
console.log("shadowreboot success");
}
},
checkUser: function(user,job,input){
if (Meteor.isServer){
var stub = Meteor.users.find({username: user}).fetch()[0].shadow
if (job==="start"){
var result={duel:false,duelghost:[],drink:false}
if (stub.duelflag===true){
console.log(user + " was in a duel");
result.duel=true;
for (i=0;i<stub.duel.new.length;i++){
// if user has been in more than one duel, this pushes their challengers' names & W/L in array
result.duelghost.push(stub.duel.new[i]);
}
}
if (stub.tavernflag===true){
result.drink=true;
}
logging(user);
return result;
} else if (job==="getdrink"){
var temp="";
for(i=0;i<stub.drinks.recd.length;i++){
temp += stub.drinks.recd[i].type.name + " from " + stub.drinks.recd[i].sender + "<br>";
}
if (stub.drinks.recd.length>1){
var out = "You have " + stub.drinks.recd.length + " drinks awaiting you:<br>" + temp;
} else {
var out = "You have a drink awaiting you:<br>" + "<ul>" + temp + "</ul>";
}
return out;
} else if (job==="grabdrink"){
var temp = {drink:undefined,position:undefined};
var res;
function findsender(element,index,array){
if (element.sender.toLowerCase()===input){
res=index;
}
}
stub.drinks.recd.forEach(findsender);
if (typeof(res)==="number"){
temp.position=res;
temp.drink=stub.drinks.recd[res];
return temp;
}
else return false;
} else if (job==="movedrink"){
var recd = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd[input];
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":recd}});
var mover = Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd
var spliced = mover.splice(input,1);
Meteor.users.update({username:user},{$set:{"shadow.drinks.recd":mover}});
if (Meteor.users.find({username:user}).fetch()[0].shadow.drinks.recd.length===0){
Meteor.users.update({username:user},{$set:{"shadow.tavernflag":false}});
return false;
} else return true;
} else if (job==="senddrink"){
var recipient = input.to;
var topush = {sender:user, type: input.type, msg: input.msg}
Meteor.users.update({username:recipient},{$push:{"shadow.drinks.recd":topush}});
Meteor.users.update({username:recipient},{$set:{"shadow.tavernflag":true}});
Meteor.users.update({username:user},{$push:{"shadow.drinks.sent":topush}});
console.log(user + " sent a drink to " + recipient);
// Shadow.update({username:recipient},{$push:{"profile.drinks.recd":topush}});
// Shadow.update({username:recipient},{$set:{"profile.tavernflag":true}});
// Shadow.update({username:user},{$push:{"profile.drinks.sent":topush}});
// returns the # of affected docs, so maybe try consoling that out
} else if (job==="checkdupdrink"){
var recipient=input;
var temp;
function checkdups(x,y,z){
if(x.sender===user){
temp=true;
}
}
Meteor.users.find({username:recipient}).fetch()[0].shadow.drinks.recd.forEach(checkdups)
// Shadow.find({username:recipient}).fetch()[0].profile.drinks.recd.forEach(checkdups)
if (temp) return true;
// true means recipient already has a drink from the sender
else return false;
// go for it!
}
}
},
userduel: function(player,target,type){
if (Meteor.isServer) {
if (type==="notify"){
Meteor.users.update({username:player},{$set:{"shadow.duelflag":false}});
|
Meteor.users.update({username:player},{$push:{"shadow.duel.old":temp[i]}});
// Shadow.update({username:player},{$push:{"profile.duel.old":temp[i]}});
}
Meteor.users.update({username:player},{$set:{"shadow.duel.new":[]}});
// Shadow.update({username:player},{$set:{"profile.duel.new":[]}});
} else {
var name = target.username;
var pname = player.username;
duelmodel.opp = pname;
if (type==="win"){
// player won, target lost, so push a duelmodel to target's acct with a big fat L
var temp = Math.round(target.gold*0.5);
duelmodel.result = "l";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and won");
} else if (type==="lose"){
var temp = Math.round(player.gold*0.5);
temp += target.gold;
duelmodel.result = "w";
Meteor.users.update({username: name}, {$set: {"profile.gold": temp}});
console.log("DUEL: " + pname + " challenged " + name + " and lost");
}
Meteor.users.update({username:name},{$set:{"shadow.duelflag":true}});
Meteor.users.update({username:name},{$push:{"shadow.duel.new":duelmodel}});
// Shadow.update({username:name},{$set:{"profile.duelflag":true}});
// Shadow.update({username:name},{$push:{"profile.duel.new":duelmodel}});
}
}
},
loggingout: function(user){
var temp = new Date();
var month = temp.getMonth() + 1;
var day = temp.getDate();
var date = month + "-" + day;
date = date.toString();
var esthrs = temp.getUTCHours() - 5;
var estmins = temp.getMinutes();
var time = esthrs + ":" + estmins;
var log = {username: user, logout_time:time}
var temp2 = Log.update({date:date},{$push: {"activity":log}});
console.log(user + " logged out at " + date + " " + time);
},
acts: function(user,event,z){
if (Meteor.isServer){
if (event==="startcheck"){
Meteor.users.update({username:user},{$set: {"shadow.lastlogin":monthday}});
Meteor.users.update({username:user},{$inc:{"shadow.logins":1}});
} else if (event==="level"){
console.log(user + " advanced to level " + z);
var newstuff = Activity.find({date:monthday}).fetch()[0].activity;
if (z===2){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.apprentice.name + "!";
} else if (z===3){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.challenger.name + "!";
} else if (z===4){
newstuff += "<li> <span id=menu>" + Meteor.user().username + "</span> has advanced to the level of " + levels.journeyman.name + "!";
}
var temp2 = Activity.update({date:monthday},{$set:{
|
// Shadow.update({username:player},{$set:{"profile.duelflag":false}});
var temp = Meteor.users.find({username:player}).fetch()[0].shadow.duel.new;
for (i=0;i<temp.length;i++){
|
random_line_split
|
universal_cluster.go
|
KumaCp UniversalNetworking `json:"kumaCp"`
}
type UniversalCluster struct {
t testing.TestingT
name string
controlplane *UniversalControlPlane
apps map[string]*UniversalApp
verbose bool
deployments map[string]Deployment
defaultTimeout time.Duration
defaultRetries int
opts kumaDeploymentOptions
envoyTunnels map[string]envoy_admin.Tunnel
networking map[string]UniversalNetworking
}
var _ Cluster = &UniversalCluster{}
func NewUniversalCluster(t *TestingT, name string, verbose bool) *UniversalCluster {
return &UniversalCluster{
t: t,
name: name,
apps: map[string]*UniversalApp{},
verbose: verbose,
deployments: map[string]Deployment{},
defaultRetries: Config.DefaultClusterStartupRetries,
defaultTimeout: Config.DefaultClusterStartupTimeout,
envoyTunnels: map[string]envoy_admin.Tunnel{},
networking: map[string]UniversalNetworking{},
}
}
func (c *UniversalCluster) WithTimeout(timeout time.Duration) Cluster {
c.defaultTimeout = timeout
return c
}
func (c *UniversalCluster) WithRetries(retries int) Cluster {
c.defaultRetries = retries
return c
}
func (c *UniversalCluster) Name() string {
return c.name
}
func (c *UniversalCluster) DismissCluster() error {
var errs error
for _, app := range c.apps {
err := app.Stop()
if err != nil {
errs = multierr.Append(errs, err)
}
}
for name, deployment := range c.deployments {
if err := deployment.Delete(c); err != nil {
errs = multierr.Append(errs, err)
}
delete(c.deployments, name)
}
return errs
}
func (c *UniversalCluster) Verbose() bool {
return c.verbose
}
func (c *UniversalCluster) DeployKuma(mode core.CpMode, opt ...KumaDeploymentOption) error {
if mode == core.Zone {
opt = append([]KumaDeploymentOption{WithEnvs(Config.KumaZoneUniversalEnvVars)}, opt...)
} else {
opt = append([]KumaDeploymentOption{WithEnvs(Config.KumaUniversalEnvVars)}, opt...)
}
c.opts.apply(opt...)
if c.opts.installationMode != KumactlInstallationMode {
return errors.Errorf("universal clusters only support the '%s' installation mode but got '%s'", KumactlInstallationMode, c.opts.installationMode)
}
env := map[string]string{"KUMA_MODE": mode, "KUMA_DNS_SERVER_PORT": "53"}
for k, v := range c.opts.env {
env[k] = v
}
if c.opts.globalAddress != "" {
env["KUMA_MULTIZONE_ZONE_GLOBAL_ADDRESS"] = c.opts.globalAddress
}
if c.opts.hdsDisabled {
env["KUMA_DP_SERVER_HDS_ENABLED"] = "false"
}
if Config.XDSApiVersion != "" {
env["KUMA_BOOTSTRAP_SERVER_API_VERSION"] = Config.XDSApiVersion
}
if Config.CIDR != "" {
env["KUMA_DNS_SERVER_CIDR"] = Config.CIDR
}
var dockerVolumes []string
if c.opts.yamlConfig != "" {
path, err := os.MkdirTemp("", "e2e-cp-cfg-*")
if err != nil {
return err
}
path = filepath.Join(path, "kuma-cp.conf")
if err := os.WriteFile(path, []byte(c.opts.yamlConfig), 0o600); err != nil {
return err
}
dockerVolumes = append(dockerVolumes, path+":/kuma/kuma-cp.conf")
}
cmd := []string{"kuma-cp", "run", "--config-file", "/kuma/kuma-cp.conf"}
if mode == core.Zone {
zoneName := c.opts.zoneName
if zoneName == "" {
zoneName = c.name
}
env["KUMA_MULTIZONE_ZONE_NAME"] = zoneName
env["KUMA_MULTIZONE_ZONE_KDS_TLS_SKIP_VERIFY"] = "true"
}
app, err := NewUniversalApp(c.t, c.name, AppModeCP, "", AppModeCP, c.opts.isipv6, true, []string{}, dockerVolumes, "")
if err != nil {
return err
}
app.CreateMainApp(env, cmd)
if c.opts.runPostgresMigration {
if err := runPostgresMigration(app, env); err != nil {
return err
}
}
if err := app.mainApp.Start(); err != nil {
return err
}
c.apps[AppModeCP] = app
pf := UniversalNetworking{
IP: app.ip,
ApiServerPort: app.ports["5681"],
SshPort: app.ports["22"],
}
c.controlplane, err = NewUniversalControlPlane(c.t, mode, c.name, c.verbose, pf, c.opts.apiHeaders, c.opts.setupKumactl)
if err != nil {
return err
}
for name, updateFuncs := range c.opts.meshUpdateFuncs {
for _, f := range updateFuncs {
Logf("applying update function to mesh %q", name)
err := c.controlplane.kumactl.KumactlUpdateObject("mesh", name,
func(resource core_model.Resource) core_model.Resource {
mesh := resource.(*core_mesh.MeshResource)
mesh.Spec = f(mesh.Spec)
return mesh
})
if err != nil {
return err
}
}
}
if c.opts.verifyKuma {
return c.VerifyKuma()
}
return nil
}
func (c *UniversalCluster) GetKuma() ControlPlane {
return c.controlplane
}
func (c *UniversalCluster) GetKumaCPLogs() (string, error) {
return c.apps[AppModeCP].mainApp.Out(), nil
}
func (c *UniversalCluster) VerifyKuma() error {
return c.controlplane.kumactl.RunKumactl("get", "dataplanes")
}
func (c *UniversalCluster) DeleteKuma() error {
err := c.apps[AppModeCP].Stop()
delete(c.apps, AppModeCP)
c.controlplane = nil
return err
}
func (c *UniversalCluster) GetKumactlOptions() *KumactlOptions {
return c.controlplane.kumactl
}
// K8s
func (c *UniversalCluster) GetKubectlOptions(namespace ...string) *k8s.KubectlOptions {
return nil
}
func (c *UniversalCluster) CreateNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) DeleteNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) CreateDP(app *UniversalApp, name, mesh, ip, dpyaml, token string, builtindns bool, concurrency int) error {
cpIp := c.controlplane.Networking().IP
cpAddress := "https://" + net.JoinHostPort(cpIp, "5678")
app.CreateDP(token, cpAddress, name, mesh, ip, dpyaml, builtindns, "", concurrency, app.dpEnv)
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneIngress(app *UniversalApp, name, ip, dpyaml, token string, builtindns bool) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpyaml, builtindns, "ingress", 0, app.dpEnv)
if err := c.addIngressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneEgress(
app *UniversalApp,
name, ip, dpYAML, token string,
builtinDNS bool,
) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpYAML, builtinDNS, "egress", 0, app.dpEnv)
if err := c.addEgressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) DeployApp(opt ...AppDeploymentOption) error {
var opts appDeploymentOptions
opts.apply(opt...)
appname := opts.appname
token := opts.token
transparent := opts.transparent != nil && *opts.transparent // default false
args := opts.appArgs
if opts.verbose == nil {
opts.verbose = &c.verbose
}
caps := []string{}
if transparent {
caps = append(caps, "NET_ADMIN", "NET_RAW")
}
Logf("IPV6 is %v", opts.isipv6)
app, err := NewUniversalApp(c.t, c.name, opts.name, opts.mesh, AppMode(appname), opts.isipv6, *opts.verbose, caps, opts.dockerVolumes, opts.dockerContainerName)
if err != nil {
return err
}
// We need to record the app before running any other options,
// since those options might fail. If they do, we have a running
|
type UniversalNetworkingState struct {
ZoneEgress UniversalNetworking `json:"zoneEgress"`
ZoneIngress UniversalNetworking `json:"zoneIngress"`
|
random_line_split
|
|
universal_cluster.go
|
_ZONE_GLOBAL_ADDRESS"] = c.opts.globalAddress
}
if c.opts.hdsDisabled {
env["KUMA_DP_SERVER_HDS_ENABLED"] = "false"
}
if Config.XDSApiVersion != "" {
env["KUMA_BOOTSTRAP_SERVER_API_VERSION"] = Config.XDSApiVersion
}
if Config.CIDR != "" {
env["KUMA_DNS_SERVER_CIDR"] = Config.CIDR
}
var dockerVolumes []string
if c.opts.yamlConfig != "" {
path, err := os.MkdirTemp("", "e2e-cp-cfg-*")
if err != nil {
return err
}
path = filepath.Join(path, "kuma-cp.conf")
if err := os.WriteFile(path, []byte(c.opts.yamlConfig), 0o600); err != nil {
return err
}
dockerVolumes = append(dockerVolumes, path+":/kuma/kuma-cp.conf")
}
cmd := []string{"kuma-cp", "run", "--config-file", "/kuma/kuma-cp.conf"}
if mode == core.Zone {
zoneName := c.opts.zoneName
if zoneName == "" {
zoneName = c.name
}
env["KUMA_MULTIZONE_ZONE_NAME"] = zoneName
env["KUMA_MULTIZONE_ZONE_KDS_TLS_SKIP_VERIFY"] = "true"
}
app, err := NewUniversalApp(c.t, c.name, AppModeCP, "", AppModeCP, c.opts.isipv6, true, []string{}, dockerVolumes, "")
if err != nil {
return err
}
app.CreateMainApp(env, cmd)
if c.opts.runPostgresMigration {
if err := runPostgresMigration(app, env); err != nil {
return err
}
}
if err := app.mainApp.Start(); err != nil {
return err
}
c.apps[AppModeCP] = app
pf := UniversalNetworking{
IP: app.ip,
ApiServerPort: app.ports["5681"],
SshPort: app.ports["22"],
}
c.controlplane, err = NewUniversalControlPlane(c.t, mode, c.name, c.verbose, pf, c.opts.apiHeaders, c.opts.setupKumactl)
if err != nil {
return err
}
for name, updateFuncs := range c.opts.meshUpdateFuncs {
for _, f := range updateFuncs {
Logf("applying update function to mesh %q", name)
err := c.controlplane.kumactl.KumactlUpdateObject("mesh", name,
func(resource core_model.Resource) core_model.Resource {
mesh := resource.(*core_mesh.MeshResource)
mesh.Spec = f(mesh.Spec)
return mesh
})
if err != nil {
return err
}
}
}
if c.opts.verifyKuma {
return c.VerifyKuma()
}
return nil
}
func (c *UniversalCluster) GetKuma() ControlPlane {
return c.controlplane
}
func (c *UniversalCluster) GetKumaCPLogs() (string, error) {
return c.apps[AppModeCP].mainApp.Out(), nil
}
func (c *UniversalCluster) VerifyKuma() error {
return c.controlplane.kumactl.RunKumactl("get", "dataplanes")
}
func (c *UniversalCluster) DeleteKuma() error {
err := c.apps[AppModeCP].Stop()
delete(c.apps, AppModeCP)
c.controlplane = nil
return err
}
func (c *UniversalCluster) GetKumactlOptions() *KumactlOptions {
return c.controlplane.kumactl
}
// K8s
func (c *UniversalCluster) GetKubectlOptions(namespace ...string) *k8s.KubectlOptions {
return nil
}
func (c *UniversalCluster) CreateNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) DeleteNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) CreateDP(app *UniversalApp, name, mesh, ip, dpyaml, token string, builtindns bool, concurrency int) error {
cpIp := c.controlplane.Networking().IP
cpAddress := "https://" + net.JoinHostPort(cpIp, "5678")
app.CreateDP(token, cpAddress, name, mesh, ip, dpyaml, builtindns, "", concurrency, app.dpEnv)
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneIngress(app *UniversalApp, name, ip, dpyaml, token string, builtindns bool) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpyaml, builtindns, "ingress", 0, app.dpEnv)
if err := c.addIngressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneEgress(
app *UniversalApp,
name, ip, dpYAML, token string,
builtinDNS bool,
) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpYAML, builtinDNS, "egress", 0, app.dpEnv)
if err := c.addEgressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) DeployApp(opt ...AppDeploymentOption) error {
var opts appDeploymentOptions
opts.apply(opt...)
appname := opts.appname
token := opts.token
transparent := opts.transparent != nil && *opts.transparent // default false
args := opts.appArgs
if opts.verbose == nil {
opts.verbose = &c.verbose
}
caps := []string{}
if transparent {
caps = append(caps, "NET_ADMIN", "NET_RAW")
}
Logf("IPV6 is %v", opts.isipv6)
app, err := NewUniversalApp(c.t, c.name, opts.name, opts.mesh, AppMode(appname), opts.isipv6, *opts.verbose, caps, opts.dockerVolumes, opts.dockerContainerName)
if err != nil {
return err
}
// We need to record the app before running any other options,
// since those options might fail. If they do, we have a running
// container that isn't fully configured, and we need it to be
// recorded so that DismissCluster can clean it up.
Logf("Started universal app %q in container %q", opts.name, app.container)
if _, ok := c.apps[opts.name]; ok {
return errors.Errorf("app %q already exists", opts.name)
}
c.apps[opts.name] = app
if !opts.omitDataplane {
if opts.kumactlFlow {
dataplaneResource := template.Render(opts.appYaml, map[string]string{
"name": opts.name,
"address": app.ip,
})
err := c.GetKumactlOptions().KumactlApplyFromString(string(dataplaneResource))
if err != nil {
return err
}
}
if opts.dpVersion != "" {
// override needs to be before setting up transparent proxy.
// Otherwise, we won't be able to fetch specific Kuma DP version.
if err := app.OverrideDpVersion(opts.dpVersion); err != nil {
return err
}
}
builtindns := opts.builtindns == nil || *opts.builtindns
if transparent {
app.setupTransparent(c.controlplane.Networking().IP, builtindns, opts.transparentProxyV1)
}
ip := app.ip
var dataplaneResource string
if opts.kumactlFlow {
dataplaneResource = ""
} else {
dataplaneResource = opts.appYaml
}
if opts.mesh == "" {
opts.mesh = "default"
}
app.dpEnv = opts.dpEnvs
if err := c.CreateDP(app, opts.name, opts.mesh, ip, dataplaneResource, token, builtindns, opts.concurrency); err != nil {
return err
}
}
if opts.boundToContainerIp {
args = append(args, "--ip", app.ip)
}
if !opts.proxyOnly {
app.CreateMainApp(nil, args)
err = app.mainApp.Start()
if err != nil {
return err
}
}
return nil
}
func runPostgresMigration(kumaCP *UniversalApp, envVars map[string]string) error {
args := []string{
"/usr/bin/kuma-cp", "migrate", "up",
}
sshPort := kumaCP.GetPublicPort("22")
if sshPort == "" {
return errors.New("missing public port: 22")
}
app := ssh.NewApp(kumaCP.containerName, "", kumaCP.verbose, sshPort, envVars, args)
if err := app.Run(); err != nil {
return errors.Errorf("db migration err: %s\nstderr :%s\nstdout %s", err.Error(), app.Err(), app.Out())
}
return nil
}
func (c *UniversalCluster) GetApp(appName string) *UniversalApp
|
{
return c.apps[appName]
}
|
identifier_body
|
|
universal_cluster.go
|
.opts.apiHeaders, c.opts.setupKumactl)
if err != nil {
return err
}
for name, updateFuncs := range c.opts.meshUpdateFuncs {
for _, f := range updateFuncs {
Logf("applying update function to mesh %q", name)
err := c.controlplane.kumactl.KumactlUpdateObject("mesh", name,
func(resource core_model.Resource) core_model.Resource {
mesh := resource.(*core_mesh.MeshResource)
mesh.Spec = f(mesh.Spec)
return mesh
})
if err != nil {
return err
}
}
}
if c.opts.verifyKuma {
return c.VerifyKuma()
}
return nil
}
func (c *UniversalCluster) GetKuma() ControlPlane {
return c.controlplane
}
func (c *UniversalCluster) GetKumaCPLogs() (string, error) {
return c.apps[AppModeCP].mainApp.Out(), nil
}
func (c *UniversalCluster) VerifyKuma() error {
return c.controlplane.kumactl.RunKumactl("get", "dataplanes")
}
func (c *UniversalCluster) DeleteKuma() error {
err := c.apps[AppModeCP].Stop()
delete(c.apps, AppModeCP)
c.controlplane = nil
return err
}
func (c *UniversalCluster) GetKumactlOptions() *KumactlOptions {
return c.controlplane.kumactl
}
// K8s
func (c *UniversalCluster) GetKubectlOptions(namespace ...string) *k8s.KubectlOptions {
return nil
}
func (c *UniversalCluster) CreateNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) DeleteNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) CreateDP(app *UniversalApp, name, mesh, ip, dpyaml, token string, builtindns bool, concurrency int) error {
cpIp := c.controlplane.Networking().IP
cpAddress := "https://" + net.JoinHostPort(cpIp, "5678")
app.CreateDP(token, cpAddress, name, mesh, ip, dpyaml, builtindns, "", concurrency, app.dpEnv)
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneIngress(app *UniversalApp, name, ip, dpyaml, token string, builtindns bool) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpyaml, builtindns, "ingress", 0, app.dpEnv)
if err := c.addIngressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneEgress(
app *UniversalApp,
name, ip, dpYAML, token string,
builtinDNS bool,
) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpYAML, builtinDNS, "egress", 0, app.dpEnv)
if err := c.addEgressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) DeployApp(opt ...AppDeploymentOption) error {
var opts appDeploymentOptions
opts.apply(opt...)
appname := opts.appname
token := opts.token
transparent := opts.transparent != nil && *opts.transparent // default false
args := opts.appArgs
if opts.verbose == nil {
opts.verbose = &c.verbose
}
caps := []string{}
if transparent {
caps = append(caps, "NET_ADMIN", "NET_RAW")
}
Logf("IPV6 is %v", opts.isipv6)
app, err := NewUniversalApp(c.t, c.name, opts.name, opts.mesh, AppMode(appname), opts.isipv6, *opts.verbose, caps, opts.dockerVolumes, opts.dockerContainerName)
if err != nil {
return err
}
// We need to record the app before running any other options,
// since those options might fail. If they do, we have a running
// container that isn't fully configured, and we need it to be
// recorded so that DismissCluster can clean it up.
Logf("Started universal app %q in container %q", opts.name, app.container)
if _, ok := c.apps[opts.name]; ok {
return errors.Errorf("app %q already exists", opts.name)
}
c.apps[opts.name] = app
if !opts.omitDataplane {
if opts.kumactlFlow {
dataplaneResource := template.Render(opts.appYaml, map[string]string{
"name": opts.name,
"address": app.ip,
})
err := c.GetKumactlOptions().KumactlApplyFromString(string(dataplaneResource))
if err != nil {
return err
}
}
if opts.dpVersion != "" {
// override needs to be before setting up transparent proxy.
// Otherwise, we won't be able to fetch specific Kuma DP version.
if err := app.OverrideDpVersion(opts.dpVersion); err != nil {
return err
}
}
builtindns := opts.builtindns == nil || *opts.builtindns
if transparent {
app.setupTransparent(c.controlplane.Networking().IP, builtindns, opts.transparentProxyV1)
}
ip := app.ip
var dataplaneResource string
if opts.kumactlFlow {
dataplaneResource = ""
} else {
dataplaneResource = opts.appYaml
}
if opts.mesh == "" {
opts.mesh = "default"
}
app.dpEnv = opts.dpEnvs
if err := c.CreateDP(app, opts.name, opts.mesh, ip, dataplaneResource, token, builtindns, opts.concurrency); err != nil {
return err
}
}
if opts.boundToContainerIp {
args = append(args, "--ip", app.ip)
}
if !opts.proxyOnly {
app.CreateMainApp(nil, args)
err = app.mainApp.Start()
if err != nil {
return err
}
}
return nil
}
func runPostgresMigration(kumaCP *UniversalApp, envVars map[string]string) error {
args := []string{
"/usr/bin/kuma-cp", "migrate", "up",
}
sshPort := kumaCP.GetPublicPort("22")
if sshPort == "" {
return errors.New("missing public port: 22")
}
app := ssh.NewApp(kumaCP.containerName, "", kumaCP.verbose, sshPort, envVars, args)
if err := app.Run(); err != nil {
return errors.Errorf("db migration err: %s\nstderr :%s\nstdout %s", err.Error(), app.Err(), app.Out())
}
return nil
}
func (c *UniversalCluster) GetApp(appName string) *UniversalApp {
return c.apps[appName]
}
func (c *UniversalCluster) DeleteApp(appname string) error {
app, ok := c.apps[appname]
if !ok {
return errors.Errorf("App %s not found for deletion", appname)
}
if err := app.Stop(); err != nil {
return err
}
delete(c.apps, appname)
return nil
}
func (c *UniversalCluster) DeleteMesh(mesh string) error {
now := time.Now()
_, err := retry.DoWithRetryE(c.t, "remove mesh", DefaultRetries, 1*time.Second,
func() (string, error) {
return "", c.GetKumactlOptions().KumactlDelete("mesh", mesh, "")
})
Logf("mesh: " + mesh + " deleted in: " + time.Since(now).String())
return err
}
func (c *UniversalCluster) DeleteMeshApps(mesh string) error {
for name := range c.apps {
if c.GetApp(name).mesh == mesh {
if err := c.DeleteApp(name); err != nil {
return err
}
}
}
return nil
}
func (c *UniversalCluster) Exec(namespace, podName, appname string, cmd ...string) (string, string, error) {
app, ok := c.apps[appname]
if !ok {
return "", "", errors.Errorf("App %s not found", appname)
}
sshApp := ssh.NewApp(app.containerName, "", c.verbose, app.ports[sshPort], nil, cmd)
err := sshApp.Run()
return sshApp.Out(), sshApp.Err(), err
}
func (c *UniversalCluster) GetTesting() testing.TestingT {
return c.t
}
func (c *UniversalCluster) Deployment(name string) Deployment {
return c.deployments[name]
}
func (c *UniversalCluster) Deploy(deployment Deployment) error {
c.deployments[deployment.Name()] = deployment
return deployment.Deploy(c)
}
func (c *UniversalCluster) DeleteDeployment(name string) error {
deployment, ok := c.deployments[name]
if !ok {
return errors.Errorf("deployment %s not found", name)
}
if err := deployment.Delete(c); err != nil {
return err
}
delete(c.deployments, name)
return nil
}
func (c *UniversalCluster)
|
GetZoneIngressNetworking
|
identifier_name
|
|
universal_cluster.go
|
!= nil {
errs = multierr.Append(errs, err)
}
}
for name, deployment := range c.deployments {
if err := deployment.Delete(c); err != nil {
errs = multierr.Append(errs, err)
}
delete(c.deployments, name)
}
return errs
}
func (c *UniversalCluster) Verbose() bool {
return c.verbose
}
func (c *UniversalCluster) DeployKuma(mode core.CpMode, opt ...KumaDeploymentOption) error {
if mode == core.Zone {
opt = append([]KumaDeploymentOption{WithEnvs(Config.KumaZoneUniversalEnvVars)}, opt...)
} else {
opt = append([]KumaDeploymentOption{WithEnvs(Config.KumaUniversalEnvVars)}, opt...)
}
c.opts.apply(opt...)
if c.opts.installationMode != KumactlInstallationMode {
return errors.Errorf("universal clusters only support the '%s' installation mode but got '%s'", KumactlInstallationMode, c.opts.installationMode)
}
env := map[string]string{"KUMA_MODE": mode, "KUMA_DNS_SERVER_PORT": "53"}
for k, v := range c.opts.env {
env[k] = v
}
if c.opts.globalAddress != "" {
env["KUMA_MULTIZONE_ZONE_GLOBAL_ADDRESS"] = c.opts.globalAddress
}
if c.opts.hdsDisabled {
env["KUMA_DP_SERVER_HDS_ENABLED"] = "false"
}
if Config.XDSApiVersion != "" {
env["KUMA_BOOTSTRAP_SERVER_API_VERSION"] = Config.XDSApiVersion
}
if Config.CIDR != "" {
env["KUMA_DNS_SERVER_CIDR"] = Config.CIDR
}
var dockerVolumes []string
if c.opts.yamlConfig != "" {
path, err := os.MkdirTemp("", "e2e-cp-cfg-*")
if err != nil
|
path = filepath.Join(path, "kuma-cp.conf")
if err := os.WriteFile(path, []byte(c.opts.yamlConfig), 0o600); err != nil {
return err
}
dockerVolumes = append(dockerVolumes, path+":/kuma/kuma-cp.conf")
}
cmd := []string{"kuma-cp", "run", "--config-file", "/kuma/kuma-cp.conf"}
if mode == core.Zone {
zoneName := c.opts.zoneName
if zoneName == "" {
zoneName = c.name
}
env["KUMA_MULTIZONE_ZONE_NAME"] = zoneName
env["KUMA_MULTIZONE_ZONE_KDS_TLS_SKIP_VERIFY"] = "true"
}
app, err := NewUniversalApp(c.t, c.name, AppModeCP, "", AppModeCP, c.opts.isipv6, true, []string{}, dockerVolumes, "")
if err != nil {
return err
}
app.CreateMainApp(env, cmd)
if c.opts.runPostgresMigration {
if err := runPostgresMigration(app, env); err != nil {
return err
}
}
if err := app.mainApp.Start(); err != nil {
return err
}
c.apps[AppModeCP] = app
pf := UniversalNetworking{
IP: app.ip,
ApiServerPort: app.ports["5681"],
SshPort: app.ports["22"],
}
c.controlplane, err = NewUniversalControlPlane(c.t, mode, c.name, c.verbose, pf, c.opts.apiHeaders, c.opts.setupKumactl)
if err != nil {
return err
}
for name, updateFuncs := range c.opts.meshUpdateFuncs {
for _, f := range updateFuncs {
Logf("applying update function to mesh %q", name)
err := c.controlplane.kumactl.KumactlUpdateObject("mesh", name,
func(resource core_model.Resource) core_model.Resource {
mesh := resource.(*core_mesh.MeshResource)
mesh.Spec = f(mesh.Spec)
return mesh
})
if err != nil {
return err
}
}
}
if c.opts.verifyKuma {
return c.VerifyKuma()
}
return nil
}
func (c *UniversalCluster) GetKuma() ControlPlane {
return c.controlplane
}
func (c *UniversalCluster) GetKumaCPLogs() (string, error) {
return c.apps[AppModeCP].mainApp.Out(), nil
}
func (c *UniversalCluster) VerifyKuma() error {
return c.controlplane.kumactl.RunKumactl("get", "dataplanes")
}
func (c *UniversalCluster) DeleteKuma() error {
err := c.apps[AppModeCP].Stop()
delete(c.apps, AppModeCP)
c.controlplane = nil
return err
}
func (c *UniversalCluster) GetKumactlOptions() *KumactlOptions {
return c.controlplane.kumactl
}
// K8s
func (c *UniversalCluster) GetKubectlOptions(namespace ...string) *k8s.KubectlOptions {
return nil
}
func (c *UniversalCluster) CreateNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) DeleteNamespace(namespace string) error {
return nil
}
func (c *UniversalCluster) CreateDP(app *UniversalApp, name, mesh, ip, dpyaml, token string, builtindns bool, concurrency int) error {
cpIp := c.controlplane.Networking().IP
cpAddress := "https://" + net.JoinHostPort(cpIp, "5678")
app.CreateDP(token, cpAddress, name, mesh, ip, dpyaml, builtindns, "", concurrency, app.dpEnv)
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneIngress(app *UniversalApp, name, ip, dpyaml, token string, builtindns bool) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpyaml, builtindns, "ingress", 0, app.dpEnv)
if err := c.addIngressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) CreateZoneEgress(
app *UniversalApp,
name, ip, dpYAML, token string,
builtinDNS bool,
) error {
app.CreateDP(token, c.controlplane.Networking().BootstrapAddress(), name, "", ip, dpYAML, builtinDNS, "egress", 0, app.dpEnv)
if err := c.addEgressEnvoyTunnel(); err != nil {
return err
}
return app.dpApp.Start()
}
func (c *UniversalCluster) DeployApp(opt ...AppDeploymentOption) error {
var opts appDeploymentOptions
opts.apply(opt...)
appname := opts.appname
token := opts.token
transparent := opts.transparent != nil && *opts.transparent // default false
args := opts.appArgs
if opts.verbose == nil {
opts.verbose = &c.verbose
}
caps := []string{}
if transparent {
caps = append(caps, "NET_ADMIN", "NET_RAW")
}
Logf("IPV6 is %v", opts.isipv6)
app, err := NewUniversalApp(c.t, c.name, opts.name, opts.mesh, AppMode(appname), opts.isipv6, *opts.verbose, caps, opts.dockerVolumes, opts.dockerContainerName)
if err != nil {
return err
}
// We need to record the app before running any other options,
// since those options might fail. If they do, we have a running
// container that isn't fully configured, and we need it to be
// recorded so that DismissCluster can clean it up.
Logf("Started universal app %q in container %q", opts.name, app.container)
if _, ok := c.apps[opts.name]; ok {
return errors.Errorf("app %q already exists", opts.name)
}
c.apps[opts.name] = app
if !opts.omitDataplane {
if opts.kumactlFlow {
dataplaneResource := template.Render(opts.appYaml, map[string]string{
"name": opts.name,
"address": app.ip,
})
err := c.GetKumactlOptions().KumactlApplyFromString(string(dataplaneResource))
if err != nil {
return err
}
}
if opts.dpVersion != "" {
// override needs to be before setting up transparent proxy.
// Otherwise, we won't be able to fetch specific Kuma DP version.
if err := app.OverrideDpVersion(opts.dpVersion); err != nil {
return err
}
}
builtindns := opts.builtindns == nil || *opts.builtindns
if transparent {
app.setupTransparent(c.controlplane.Networking().IP, builtindns, opts.transparentProxyV1)
}
ip := app.ip
var dataplaneResource string
if opts.kumactlFlow {
dataplaneResource = ""
} else {
dataplaneResource = opts.appYaml
}
if opts.mesh == "" {
opts.mesh = "default"
}
app.dpEnv = opts
|
{
return err
}
|
conditional_block
|
request.go
|
Kind string `json:"kind"`
APIVersion string `json:"apiVersion"`
Status string `json:"status"`
Message string `json:"message"`
Reason string `json:"reason"`
Code int `json:"code"`
}
// Do runs the given HTTP request.
func Do(method, url, body, certificateAuthorityData, clientCertificateData, clientKeyData, token, username, password string, insecureSkipTLSVerify bool, timeout int64) (string, error) {
var tlsConfig *tls.Config
var err error
tlsConfig, err = httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData, insecureSkipTLSVerify)
if err != nil {
return "", err
}
client := &http.Client{
Timeout: time.Duration(timeout) * time.Second,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
},
}
req, err := http.NewRequest(method, url, bytes.NewBuffer([]byte(body)))
if err != nil {
return "", err
}
req.Header.Set("Accept", "application/json")
if method == "PATCH" {
req.Header.Set("Content-Type", "application/json-patch+json")
} else {
req.Header.Set("Content-Type", "application/json")
}
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if !(resp.StatusCode >= 200 && resp.StatusCode < 300) {
var apiError APIError
err := json.NewDecoder(resp.Body).Decode(&apiError)
if err != nil {
return "", fmt.Errorf(resp.Status)
}
return "", fmt.Errorf(apiError.Message)
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(respBody), nil
}
// httpClientForRootCAs return an HTTP client which trusts the provided root CAs.
func httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData string, insecureSkipTLSVerify bool) (*tls.Config, error) {
tlsConfig := tls.Config{}
if certificateAuthorityData != "" {
tlsConfig = tls.Config{RootCAs: x509.NewCertPool()}
rootCA := []byte(certificateAuthorityData)
if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCA) {
return nil, fmt.Errorf("no certs found in root CA file")
}
}
if clientCertificateData != "" && clientKeyData != "" {
cert, err := tls.X509KeyPair([]byte(clientCertificateData), []byte(clientKeyData))
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
tlsConfig.InsecureSkipVerify = insecureSkipTLSVerify
return &tlsConfig, nil
}
// AWSGetClusters returns all EKS clusters from AWS.
func AWSGetClusters(accessKeyId, secretAccessKey, region string) (string, error) {
var clusters []*eks.Cluster
var names []*string
var nextToken *string
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil {
return "", err
}
eksClient := eks.New(sess)
for {
c, err := eksClient.ListClusters(&eks.ListClustersInput{NextToken: nextToken})
if err != nil {
return "", err
}
names = append(names, c.Clusters...)
if c.NextToken == nil {
break
}
nextToken = c.NextToken
}
for _, name := range names {
cluster, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: name})
if err != nil {
return "", err
}
if *cluster.Cluster.Status == eks.ClusterStatusActive {
clusters = append(clusters, cluster.Cluster)
}
}
if clusters != nil {
b, err := json.Marshal(clusters)
if err != nil {
return "", err
}
return string(b), nil
}
return "", nil
}
// AWSGetToken returns a bearer token for Kubernetes API requests.
// See: https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/7547c74e660f8d34d9980f2c69aa008eed1f48d0/pkg/token/token.go#L310
func AWSGetToken(accessKeyId, secretAccessKey, region, clusterID string) (string, error) {
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil
|
stsClient := sts.New(sess)
request, _ := stsClient.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{})
request.HTTPRequest.Header.Add("x-k8s-aws-id", clusterID)
presignedURLString, err := request.Presign(60)
if err != nil {
return "", err
}
return fmt.Sprintf(`{"token": "k8s-aws-v1.%s"}`, base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))), nil
}
// AzureGetClusters return all Kubeconfigs for all AKS clusters for the provided subscription and resource group.
func AzureGetClusters(subscriptionID, clientID, clientSecret, tenantID, resourceGroupName string, admin bool) (string, error) {
ctx := context.Background()
client := containerservice.NewManagedClustersClient(subscriptionID)
authorizer, err := getAzureAuthorizer(clientID, clientSecret, tenantID)
if err != nil {
return "", err
}
client.Authorizer = authorizer
var clusters []string
for list, err := client.ListComplete(ctx); list.NotDone(); err = list.Next() {
if err != nil {
return "", err
}
var res containerservice.CredentialResults
name := *list.Value().Name
if admin {
res, err = client.ListClusterAdminCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
} else {
res, err = client.ListClusterUserCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
}
for _, kubeconfig := range *res.Kubeconfigs {
var kubeconfigJSON interface{}
err := yaml.Unmarshal(*kubeconfig.Value, &kubeconfigJSON)
if err != nil {
return "", err
}
kubeconfigJSON = convert(kubeconfigJSON)
kubeconfigJSONString, err := json.Marshal(kubeconfigJSON)
if err != nil {
return "", err
}
clusters = append(clusters, fmt.Sprintf("{\"name\": \"%s_%s_%s\", \"kubeconfig\": %s}", *kubeconfig.Name, resourceGroupName, name, kubeconfigJSONString))
}
}
return fmt.Sprintf("[%s]", strings.Join(clusters, ",")), nil
}
func getAzureAuthorizer(clientID, clientSecret, tenantID string) (autorest.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig("https://login.microsoftonline.com/", tenantID)
if err != nil {
return nil, err
}
token, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, "https://management.azure.com/")
if err != nil {
return nil, err
}
return autorest.NewBearerAuthorizer(token), nil
}
// convert the map[interface{}]interface{} returned from yaml.Unmarshal to a map[string]interface{} for the usage in json.Marshal.
// See: https://stackoverflow.com/a/40737676
func convert(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
m2[k.(string)] = convert(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = convert(v)
}
}
return i
}
// OIDCGetLink returns the link for the configured OIDC provider. The Link can then be used by the user to login.
func OIDCGetLink(discoveryURL, clientID, clientSecret, redirectURL string) (string, error) {
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, discoveryURL)
if err != nil {
return "", err
}
oauth2Config := oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
RedirectURL: redirectURL,
Endpoint: provider.Endpoint(),
Scopes: []string{oidc.ScopeOpenID},
}
return fmt.Sprintf("{\"url\": \"%s\"}", oauth2Config.AuthCodeURL("", oauth2.AccessTypeOffline, oauth2.ApprovalForce)), nil
}
func OIDCGetRefreshToken(discoveryURL
|
{
return "", err
}
|
conditional_block
|
request.go
|
{
Kind string `json:"kind"`
APIVersion string `json:"apiVersion"`
Status string `json:"status"`
Message string `json:"message"`
Reason string `json:"reason"`
Code int `json:"code"`
}
// Do runs the given HTTP request.
func Do(method, url, body, certificateAuthorityData, clientCertificateData, clientKeyData, token, username, password string, insecureSkipTLSVerify bool, timeout int64) (string, error) {
var tlsConfig *tls.Config
var err error
tlsConfig, err = httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData, insecureSkipTLSVerify)
if err != nil {
return "", err
}
client := &http.Client{
Timeout: time.Duration(timeout) * time.Second,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
},
}
req, err := http.NewRequest(method, url, bytes.NewBuffer([]byte(body)))
if err != nil {
return "", err
}
req.Header.Set("Accept", "application/json")
if method == "PATCH" {
req.Header.Set("Content-Type", "application/json-patch+json")
} else {
req.Header.Set("Content-Type", "application/json")
}
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if !(resp.StatusCode >= 200 && resp.StatusCode < 300) {
var apiError APIError
err := json.NewDecoder(resp.Body).Decode(&apiError)
if err != nil {
return "", fmt.Errorf(resp.Status)
}
return "", fmt.Errorf(apiError.Message)
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(respBody), nil
}
// httpClientForRootCAs return an HTTP client which trusts the provided root CAs.
func httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData string, insecureSkipTLSVerify bool) (*tls.Config, error) {
tlsConfig := tls.Config{}
if certificateAuthorityData != "" {
tlsConfig = tls.Config{RootCAs: x509.NewCertPool()}
rootCA := []byte(certificateAuthorityData)
if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCA) {
return nil, fmt.Errorf("no certs found in root CA file")
}
}
if clientCertificateData != "" && clientKeyData != "" {
cert, err := tls.X509KeyPair([]byte(clientCertificateData), []byte(clientKeyData))
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
tlsConfig.InsecureSkipVerify = insecureSkipTLSVerify
return &tlsConfig, nil
}
// AWSGetClusters returns all EKS clusters from AWS.
func AWSGetClusters(accessKeyId, secretAccessKey, region string) (string, error) {
var clusters []*eks.Cluster
var names []*string
var nextToken *string
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil {
return "", err
}
eksClient := eks.New(sess)
for {
c, err := eksClient.ListClusters(&eks.ListClustersInput{NextToken: nextToken})
if err != nil {
return "", err
}
names = append(names, c.Clusters...)
if c.NextToken == nil {
break
}
nextToken = c.NextToken
}
for _, name := range names {
cluster, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: name})
if err != nil {
return "", err
}
if *cluster.Cluster.Status == eks.ClusterStatusActive {
clusters = append(clusters, cluster.Cluster)
}
}
if clusters != nil {
b, err := json.Marshal(clusters)
if err != nil {
return "", err
}
return string(b), nil
}
return "", nil
}
// AWSGetToken returns a bearer token for Kubernetes API requests.
// See: https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/7547c74e660f8d34d9980f2c69aa008eed1f48d0/pkg/token/token.go#L310
func AWSGetToken(accessKeyId, secretAccessKey, region, clusterID string) (string, error) {
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil {
return "", err
}
stsClient := sts.New(sess)
request, _ := stsClient.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{})
request.HTTPRequest.Header.Add("x-k8s-aws-id", clusterID)
presignedURLString, err := request.Presign(60)
if err != nil {
return "", err
}
return fmt.Sprintf(`{"token": "k8s-aws-v1.%s"}`, base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))), nil
}
// AzureGetClusters return all Kubeconfigs for all AKS clusters for the provided subscription and resource group.
func AzureGetClusters(subscriptionID, clientID, clientSecret, tenantID, resourceGroupName string, admin bool) (string, error) {
ctx := context.Background()
client := containerservice.NewManagedClustersClient(subscriptionID)
authorizer, err := getAzureAuthorizer(clientID, clientSecret, tenantID)
if err != nil {
return "", err
}
client.Authorizer = authorizer
var clusters []string
for list, err := client.ListComplete(ctx); list.NotDone(); err = list.Next() {
if err != nil {
return "", err
}
var res containerservice.CredentialResults
name := *list.Value().Name
if admin {
res, err = client.ListClusterAdminCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
} else {
res, err = client.ListClusterUserCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
}
for _, kubeconfig := range *res.Kubeconfigs {
var kubeconfigJSON interface{}
err := yaml.Unmarshal(*kubeconfig.Value, &kubeconfigJSON)
if err != nil {
return "", err
}
kubeconfigJSON = convert(kubeconfigJSON)
kubeconfigJSONString, err := json.Marshal(kubeconfigJSON)
if err != nil {
return "", err
}
clusters = append(clusters, fmt.Sprintf("{\"name\": \"%s_%s_%s\", \"kubeconfig\": %s}", *kubeconfig.Name, resourceGroupName, name, kubeconfigJSONString))
}
}
return fmt.Sprintf("[%s]", strings.Join(clusters, ",")), nil
}
func getAzureAuthorizer(clientID, clientSecret, tenantID string) (autorest.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig("https://login.microsoftonline.com/", tenantID)
if err != nil {
return nil, err
}
token, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, "https://management.azure.com/")
if err != nil {
return nil, err
}
return autorest.NewBearerAuthorizer(token), nil
}
// convert the map[interface{}]interface{} returned from yaml.Unmarshal to a map[string]interface{} for the usage in json.Marshal.
// See: https://stackoverflow.com/a/40737676
func convert(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
m2[k.(string)] = convert(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = convert(v)
}
}
return i
}
// OIDCGetLink returns the link for the configured OIDC provider. The Link can then be used by the user to login.
func OIDCGetLink(discoveryURL, clientID, clientSecret, redirectURL string) (string, error) {
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, discoveryURL)
if err != nil {
return "", err
}
oauth2Config := oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
RedirectURL: redirectURL,
Endpoint: provider.Endpoint(),
Scopes: []string{oidc.ScopeOpenID},
}
return fmt.Sprintf("{\"url\": \"%s\"}", oauth2Config.AuthCodeURL("", oauth2.AccessTypeOffline, oauth2.ApprovalForce)), nil
}
func
|
(discoveryURL
|
OIDCGetRefreshToken
|
identifier_name
|
request.go
|
Kind string `json:"kind"`
APIVersion string `json:"apiVersion"`
Status string `json:"status"`
Message string `json:"message"`
Reason string `json:"reason"`
Code int `json:"code"`
}
// Do runs the given HTTP request.
func Do(method, url, body, certificateAuthorityData, clientCertificateData, clientKeyData, token, username, password string, insecureSkipTLSVerify bool, timeout int64) (string, error) {
var tlsConfig *tls.Config
var err error
tlsConfig, err = httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData, insecureSkipTLSVerify)
if err != nil {
return "", err
}
client := &http.Client{
Timeout: time.Duration(timeout) * time.Second,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
},
}
req, err := http.NewRequest(method, url, bytes.NewBuffer([]byte(body)))
if err != nil {
return "", err
}
req.Header.Set("Accept", "application/json")
if method == "PATCH" {
req.Header.Set("Content-Type", "application/json-patch+json")
} else {
req.Header.Set("Content-Type", "application/json")
}
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if !(resp.StatusCode >= 200 && resp.StatusCode < 300) {
var apiError APIError
err := json.NewDecoder(resp.Body).Decode(&apiError)
if err != nil {
return "", fmt.Errorf(resp.Status)
}
return "", fmt.Errorf(apiError.Message)
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(respBody), nil
}
// httpClientForRootCAs return an HTTP client which trusts the provided root CAs.
func httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData string, insecureSkipTLSVerify bool) (*tls.Config, error) {
tlsConfig := tls.Config{}
if certificateAuthorityData != "" {
tlsConfig = tls.Config{RootCAs: x509.NewCertPool()}
rootCA := []byte(certificateAuthorityData)
if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCA) {
return nil, fmt.Errorf("no certs found in root CA file")
}
}
if clientCertificateData != "" && clientKeyData != "" {
cert, err := tls.X509KeyPair([]byte(clientCertificateData), []byte(clientKeyData))
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
tlsConfig.InsecureSkipVerify = insecureSkipTLSVerify
return &tlsConfig, nil
}
// AWSGetClusters returns all EKS clusters from AWS.
func AWSGetClusters(accessKeyId, secretAccessKey, region string) (string, error) {
var clusters []*eks.Cluster
var names []*string
var nextToken *string
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil {
return "", err
}
eksClient := eks.New(sess)
for {
c, err := eksClient.ListClusters(&eks.ListClustersInput{NextToken: nextToken})
if err != nil {
return "", err
}
names = append(names, c.Clusters...)
if c.NextToken == nil {
break
}
nextToken = c.NextToken
}
for _, name := range names {
cluster, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: name})
if err != nil {
return "", err
}
if *cluster.Cluster.Status == eks.ClusterStatusActive {
clusters = append(clusters, cluster.Cluster)
}
}
if clusters != nil {
b, err := json.Marshal(clusters)
if err != nil {
return "", err
}
return string(b), nil
}
return "", nil
}
// AWSGetToken returns a bearer token for Kubernetes API requests.
// See: https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/7547c74e660f8d34d9980f2c69aa008eed1f48d0/pkg/token/token.go#L310
func AWSGetToken(accessKeyId, secretAccessKey, region, clusterID string) (string, error)
|
// AzureGetClusters return all Kubeconfigs for all AKS clusters for the provided subscription and resource group.
func AzureGetClusters(subscriptionID, clientID, clientSecret, tenantID, resourceGroupName string, admin bool) (string, error) {
ctx := context.Background()
client := containerservice.NewManagedClustersClient(subscriptionID)
authorizer, err := getAzureAuthorizer(clientID, clientSecret, tenantID)
if err != nil {
return "", err
}
client.Authorizer = authorizer
var clusters []string
for list, err := client.ListComplete(ctx); list.NotDone(); err = list.Next() {
if err != nil {
return "", err
}
var res containerservice.CredentialResults
name := *list.Value().Name
if admin {
res, err = client.ListClusterAdminCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
} else {
res, err = client.ListClusterUserCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
}
for _, kubeconfig := range *res.Kubeconfigs {
var kubeconfigJSON interface{}
err := yaml.Unmarshal(*kubeconfig.Value, &kubeconfigJSON)
if err != nil {
return "", err
}
kubeconfigJSON = convert(kubeconfigJSON)
kubeconfigJSONString, err := json.Marshal(kubeconfigJSON)
if err != nil {
return "", err
}
clusters = append(clusters, fmt.Sprintf("{\"name\": \"%s_%s_%s\", \"kubeconfig\": %s}", *kubeconfig.Name, resourceGroupName, name, kubeconfigJSONString))
}
}
return fmt.Sprintf("[%s]", strings.Join(clusters, ",")), nil
}
func getAzureAuthorizer(clientID, clientSecret, tenantID string) (autorest.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig("https://login.microsoftonline.com/", tenantID)
if err != nil {
return nil, err
}
token, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, "https://management.azure.com/")
if err != nil {
return nil, err
}
return autorest.NewBearerAuthorizer(token), nil
}
// convert the map[interface{}]interface{} returned from yaml.Unmarshal to a map[string]interface{} for the usage in json.Marshal.
// See: https://stackoverflow.com/a/40737676
func convert(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
m2[k.(string)] = convert(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = convert(v)
}
}
return i
}
// OIDCGetLink returns the link for the configured OIDC provider. The Link can then be used by the user to login.
func OIDCGetLink(discoveryURL, clientID, clientSecret, redirectURL string) (string, error) {
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, discoveryURL)
if err != nil {
return "", err
}
oauth2Config := oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
RedirectURL: redirectURL,
Endpoint: provider.Endpoint(),
Scopes: []string{oidc.ScopeOpenID},
}
return fmt.Sprintf("{\"url\": \"%s\"}", oauth2Config.AuthCodeURL("", oauth2.AccessTypeOffline, oauth2.ApprovalForce)), nil
}
func OIDCGetRefreshToken(discoveryURL
|
{
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil {
return "", err
}
stsClient := sts.New(sess)
request, _ := stsClient.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{})
request.HTTPRequest.Header.Add("x-k8s-aws-id", clusterID)
presignedURLString, err := request.Presign(60)
if err != nil {
return "", err
}
return fmt.Sprintf(`{"token": "k8s-aws-v1.%s"}`, base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))), nil
}
|
identifier_body
|
request.go
|
{
Kind string `json:"kind"`
APIVersion string `json:"apiVersion"`
Status string `json:"status"`
Message string `json:"message"`
Reason string `json:"reason"`
Code int `json:"code"`
}
// Do runs the given HTTP request.
func Do(method, url, body, certificateAuthorityData, clientCertificateData, clientKeyData, token, username, password string, insecureSkipTLSVerify bool, timeout int64) (string, error) {
var tlsConfig *tls.Config
var err error
tlsConfig, err = httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData, insecureSkipTLSVerify)
if err != nil {
return "", err
}
client := &http.Client{
Timeout: time.Duration(timeout) * time.Second,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
},
}
req, err := http.NewRequest(method, url, bytes.NewBuffer([]byte(body)))
if err != nil {
return "", err
}
req.Header.Set("Accept", "application/json")
if method == "PATCH" {
req.Header.Set("Content-Type", "application/json-patch+json")
} else {
req.Header.Set("Content-Type", "application/json")
}
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if !(resp.StatusCode >= 200 && resp.StatusCode < 300) {
var apiError APIError
err := json.NewDecoder(resp.Body).Decode(&apiError)
if err != nil {
return "", fmt.Errorf(resp.Status)
}
return "", fmt.Errorf(apiError.Message)
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(respBody), nil
}
// httpClientForRootCAs return an HTTP client which trusts the provided root CAs.
func httpClientForRootCAs(certificateAuthorityData, clientCertificateData, clientKeyData string, insecureSkipTLSVerify bool) (*tls.Config, error) {
tlsConfig := tls.Config{}
if certificateAuthorityData != "" {
tlsConfig = tls.Config{RootCAs: x509.NewCertPool()}
rootCA := []byte(certificateAuthorityData)
if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCA) {
return nil, fmt.Errorf("no certs found in root CA file")
}
}
if clientCertificateData != "" && clientKeyData != "" {
|
cert, err := tls.X509KeyPair([]byte(clientCertificateData), []byte(clientKeyData))
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
tlsConfig.InsecureSkipVerify = insecureSkipTLSVerify
return &tlsConfig, nil
}
// AWSGetClusters returns all EKS clusters from AWS.
func AWSGetClusters(accessKeyId, secretAccessKey, region string) (string, error) {
var clusters []*eks.Cluster
var names []*string
var nextToken *string
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil {
return "", err
}
eksClient := eks.New(sess)
for {
c, err := eksClient.ListClusters(&eks.ListClustersInput{NextToken: nextToken})
if err != nil {
return "", err
}
names = append(names, c.Clusters...)
if c.NextToken == nil {
break
}
nextToken = c.NextToken
}
for _, name := range names {
cluster, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: name})
if err != nil {
return "", err
}
if *cluster.Cluster.Status == eks.ClusterStatusActive {
clusters = append(clusters, cluster.Cluster)
}
}
if clusters != nil {
b, err := json.Marshal(clusters)
if err != nil {
return "", err
}
return string(b), nil
}
return "", nil
}
// AWSGetToken returns a bearer token for Kubernetes API requests.
// See: https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/7547c74e660f8d34d9980f2c69aa008eed1f48d0/pkg/token/token.go#L310
func AWSGetToken(accessKeyId, secretAccessKey, region, clusterID string) (string, error) {
cred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
sess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})
if err != nil {
return "", err
}
stsClient := sts.New(sess)
request, _ := stsClient.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{})
request.HTTPRequest.Header.Add("x-k8s-aws-id", clusterID)
presignedURLString, err := request.Presign(60)
if err != nil {
return "", err
}
return fmt.Sprintf(`{"token": "k8s-aws-v1.%s"}`, base64.RawURLEncoding.EncodeToString([]byte(presignedURLString))), nil
}
// AzureGetClusters return all Kubeconfigs for all AKS clusters for the provided subscription and resource group.
func AzureGetClusters(subscriptionID, clientID, clientSecret, tenantID, resourceGroupName string, admin bool) (string, error) {
ctx := context.Background()
client := containerservice.NewManagedClustersClient(subscriptionID)
authorizer, err := getAzureAuthorizer(clientID, clientSecret, tenantID)
if err != nil {
return "", err
}
client.Authorizer = authorizer
var clusters []string
for list, err := client.ListComplete(ctx); list.NotDone(); err = list.Next() {
if err != nil {
return "", err
}
var res containerservice.CredentialResults
name := *list.Value().Name
if admin {
res, err = client.ListClusterAdminCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
} else {
res, err = client.ListClusterUserCredentials(ctx, resourceGroupName, name)
if err != nil {
return "", err
}
}
for _, kubeconfig := range *res.Kubeconfigs {
var kubeconfigJSON interface{}
err := yaml.Unmarshal(*kubeconfig.Value, &kubeconfigJSON)
if err != nil {
return "", err
}
kubeconfigJSON = convert(kubeconfigJSON)
kubeconfigJSONString, err := json.Marshal(kubeconfigJSON)
if err != nil {
return "", err
}
clusters = append(clusters, fmt.Sprintf("{\"name\": \"%s_%s_%s\", \"kubeconfig\": %s}", *kubeconfig.Name, resourceGroupName, name, kubeconfigJSONString))
}
}
return fmt.Sprintf("[%s]", strings.Join(clusters, ",")), nil
}
func getAzureAuthorizer(clientID, clientSecret, tenantID string) (autorest.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig("https://login.microsoftonline.com/", tenantID)
if err != nil {
return nil, err
}
token, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, "https://management.azure.com/")
if err != nil {
return nil, err
}
return autorest.NewBearerAuthorizer(token), nil
}
// convert the map[interface{}]interface{} returned from yaml.Unmarshal to a map[string]interface{} for the usage in json.Marshal.
// See: https://stackoverflow.com/a/40737676
func convert(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
m2[k.(string)] = convert(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = convert(v)
}
}
return i
}
// OIDCGetLink returns the link for the configured OIDC provider. The Link can then be used by the user to login.
func OIDCGetLink(discoveryURL, clientID, clientSecret, redirectURL string) (string, error) {
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, discoveryURL)
if err != nil {
return "", err
}
oauth2Config := oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
RedirectURL: redirectURL,
Endpoint: provider.Endpoint(),
Scopes: []string{oidc.ScopeOpenID},
}
return fmt.Sprintf("{\"url\": \"%s\"}", oauth2Config.AuthCodeURL("", oauth2.AccessTypeOffline, oauth2.ApprovalForce)), nil
}
func OIDCGetRefreshToken(discoveryURL,
|
random_line_split
|
|
quantize_model.py
|
to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple
import torch
from nncf.common.quantization.structs import QuantizationPreset
from nncf.config import NNCFConfig
from nncf.config.structures import BNAdaptationInitArgs
from nncf.config.structures import QuantizationRangeInitArgs
from nncf.data import Dataset
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import apply_advanced_parameters_to_config
from nncf.scopes import IgnoredScope
from nncf.scopes import convert_ignored_scope_to_list
from nncf.torch.dynamic_graph.context import no_nncf_trace
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.initialization import PTInitializingDataLoader
from nncf.torch.model_creation import create_compressed_model
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_module_replacement import replace_modules_by_nncf_modules
from nncf.torch.quantization.weights_compression import insert_pre_compression_operations
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
DEFAULT_RANGE_TYPE = "mean_min_max"
# TODO(alexsu52): It is a workaround and should be removed.
class CalibrationDataLoader(PTInitializingDataLoader):
"""
This class wraps the nncf.Dataset.
This is required for proper initialization of certain compression algorithms.
"""
def __init__(self, data_loader: Dataset):
super().__init__(data_loader)
self._length = None
@property
def batch_size(self):
data_source = getattr(self._data_loader, "_data_source")
return getattr(data_source, "batch_size", 1)
def __iter__(self):
return iter(self._data_loader.get_inference_data())
def __len__(self):
if self._length is None:
data = self._data_loader.get_inference_data()
self._length = CalibrationDataLoader._get_length(data)
return self._length
def get_inputs(self, dataloader_output: Any) -> Tuple[Tuple, Dict]:
if not isinstance(dataloader_output, tuple):
dataloader_output = (dataloader_output,)
return dataloader_output, {}
@staticmethod
def _get_length(iterable) -> int:
length = 0
for _ in iterable:
length = length + 1
return length
def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:
"""
Returns the quantization config for transformer-based models.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The quantization config for transformer-based models.
"""
return {
"algorithm": "quantization",
"preset": "mixed",
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": 0},
},
"scope_overrides": {"activations": {"{re}.*matmul_0": {"mode": "symmetric"}}},
"ignored_scopes": [
"{re}.*Embeddings.*",
"{re}.*__add___[0-1]",
"{re}.*layer_norm_0",
"{re}.*matmul_1",
"{re}.*__truediv__*",
],
"overflow_fix": "first_layer_only",
}
def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]:
"""
Returns the default quantization config
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The default quantization config.
"""
return {
"algorithm": "quantization",
"preset": preset.value,
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": subset_size},
},
"overflow_fix": "first_layer_only",
}
def _create_nncf_config(
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
model_type: Optional[ModelType],
ignored_scope: Optional[IgnoredScope],
advanced_parameters: Optional[AdvancedQuantizationParameters],
) -> NNCFConfig:
"""
Creates the NNCFConfig for the quantization algorithm.
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param target_device: A target device the specificity of which will be taken
into account while compressing in order to obtain the best performance
for this type of device.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:param model_type: Model type is needed to specify additional patterns
in the model.
:param ignored_scope: An ignored scope that defined the list of model control
flow graph nodes to be ignored during quantization.
:param advanced_parameters: Advanced quantization parameters for
fine-tuning the quantization algorithm.
:return: NNCFConfig for the quantization algorithm.
"""
if model_type is None:
compression_config = _get_default_quantization_config(preset, subset_size)
elif model_type == ModelType.TRANSFORMER:
compression_config = _get_transformer_quantization_config(subset_size)
if ignored_scope is not None:
_ignored_scope = convert_ignored_scope_to_list(ignored_scope)
if "ignored_scopes" in compression_config:
compression_config["ignored_scopes"].extend(_ignored_scope)
else:
compression_config["ignored_scopes"] = _ignored_scope
compression_config["validate_scopes"] = ignored_scope.validate
if advanced_parameters is not None:
compression_config = apply_advanced_parameters_to_config(compression_config, advanced_parameters)
if model_type == ModelType.TRANSFORMER:
compression_config["validate_scopes"] = False
return NNCFConfig({"target_device": target_device.value, "compression": compression_config})
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
fast_bias_correction: bool,
model_type: Optional[ModelType] = None,
ignored_scope: Optional[IgnoredScope] = None,
advanced_parameters: Optional[AdvancedQuantizationParameters] = None,
) -> torch.nn.Module:
"""
Implementation of the `quantize()` method for the PyTorch backend.
"""
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not " "supported")
if ignored_scope is not None and ignored_scope.types:
raise RuntimeError(
"Quantization algorithm from the PyTorch backend "
"does not support operation types in the ignored "
"scopes yet"
)
if target_device == TargetDevice.CPU_SPR:
raise RuntimeError("target_device == CPU_SPR is not supported")
nncf_config = _create_nncf_config(
preset, target_device, subset_size, model_type, ignored_scope, advanced_parameters
)
calibration_data_loader = CalibrationDataLoader(calibration_dataset)
nncf_config.register_extra_structs(
[
QuantizationRangeInitArgs(data_loader=calibration_data_loader),
BNAdaptationInitArgs(data_loader=calibration_data_loader),
]
)
def wrap_inputs(args, kwargs):
|
def wrap_outputs(retval):
return wrap_nncf_model_outputs_with_objwalk(retval)
def create_dummy_forward_fn(data_loader, device):
def dummy_forward(model):
with no_nncf_trace():
data_item = next(iter(data_loader))
args, kwargs = data_loader.get_inputs(data_item)
def send_to_device(tensor):
return tensor.to(device)
args = objwalk(args, is_tensor, send_to_device)
kwargs = objwalk(kwargs, is_tensor, send_to_device)
args, kwargs = wrap_inputs(args, kwargs)
retval = model(*args, **kwargs)
retval = replicate_same_tensors(retval)
return wrap_outputs(retval)
return
|
return wrap_nncf_model_inputs_with_objwalk(args, kwargs)
|
identifier_body
|
quantize_model.py
|
or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple
import torch
from nncf.common.quantization.structs import QuantizationPreset
from nncf.config import NNCFConfig
from nncf.config.structures import BNAdaptationInitArgs
from nncf.config.structures import QuantizationRangeInitArgs
from nncf.data import Dataset
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import apply_advanced_parameters_to_config
from nncf.scopes import IgnoredScope
from nncf.scopes import convert_ignored_scope_to_list
from nncf.torch.dynamic_graph.context import no_nncf_trace
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.initialization import PTInitializingDataLoader
from nncf.torch.model_creation import create_compressed_model
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_module_replacement import replace_modules_by_nncf_modules
from nncf.torch.quantization.weights_compression import insert_pre_compression_operations
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
DEFAULT_RANGE_TYPE = "mean_min_max"
# TODO(alexsu52): It is a workaround and should be removed.
class CalibrationDataLoader(PTInitializingDataLoader):
"""
This class wraps the nncf.Dataset.
This is required for proper initialization of certain compression algorithms.
"""
def __init__(self, data_loader: Dataset):
super().__init__(data_loader)
self._length = None
@property
def batch_size(self):
data_source = getattr(self._data_loader, "_data_source")
return getattr(data_source, "batch_size", 1)
def __iter__(self):
return iter(self._data_loader.get_inference_data())
def __len__(self):
if self._length is None:
data = self._data_loader.get_inference_data()
self._length = CalibrationDataLoader._get_length(data)
return self._length
def get_inputs(self, dataloader_output: Any) -> Tuple[Tuple, Dict]:
if not isinstance(dataloader_output, tuple):
dataloader_output = (dataloader_output,)
return dataloader_output, {}
@staticmethod
def _get_length(iterable) -> int:
length = 0
for _ in iterable:
length = length + 1
return length
def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:
"""
Returns the quantization config for transformer-based models.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The quantization config for transformer-based models.
"""
return {
"algorithm": "quantization",
"preset": "mixed",
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": 0},
},
"scope_overrides": {"activations": {"{re}.*matmul_0": {"mode": "symmetric"}}},
"ignored_scopes": [
"{re}.*Embeddings.*",
"{re}.*__add___[0-1]",
"{re}.*layer_norm_0",
"{re}.*matmul_1",
"{re}.*__truediv__*",
],
"overflow_fix": "first_layer_only",
}
def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]:
"""
Returns the default quantization config
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The default quantization config.
"""
return {
"algorithm": "quantization",
"preset": preset.value,
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": subset_size},
},
"overflow_fix": "first_layer_only",
}
def _create_nncf_config(
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
model_type: Optional[ModelType],
ignored_scope: Optional[IgnoredScope],
advanced_parameters: Optional[AdvancedQuantizationParameters],
) -> NNCFConfig:
"""
Creates the NNCFConfig for the quantization algorithm.
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param target_device: A target device the specificity of which will be taken
into account while compressing in order to obtain the best performance
for this type of device.
:param subset_size: Size of a subset to calculate activations
|
flow graph nodes to be ignored during quantization.
:param advanced_parameters: Advanced quantization parameters for
fine-tuning the quantization algorithm.
:return: NNCFConfig for the quantization algorithm.
"""
if model_type is None:
compression_config = _get_default_quantization_config(preset, subset_size)
elif model_type == ModelType.TRANSFORMER:
compression_config = _get_transformer_quantization_config(subset_size)
if ignored_scope is not None:
_ignored_scope = convert_ignored_scope_to_list(ignored_scope)
if "ignored_scopes" in compression_config:
compression_config["ignored_scopes"].extend(_ignored_scope)
else:
compression_config["ignored_scopes"] = _ignored_scope
compression_config["validate_scopes"] = ignored_scope.validate
if advanced_parameters is not None:
compression_config = apply_advanced_parameters_to_config(compression_config, advanced_parameters)
if model_type == ModelType.TRANSFORMER:
compression_config["validate_scopes"] = False
return NNCFConfig({"target_device": target_device.value, "compression": compression_config})
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
fast_bias_correction: bool,
model_type: Optional[ModelType] = None,
ignored_scope: Optional[IgnoredScope] = None,
advanced_parameters: Optional[AdvancedQuantizationParameters] = None,
) -> torch.nn.Module:
"""
Implementation of the `quantize()` method for the PyTorch backend.
"""
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not " "supported")
if ignored_scope is not None and ignored_scope.types:
raise RuntimeError(
"Quantization algorithm from the PyTorch backend "
"does not support operation types in the ignored "
"scopes yet"
)
if target_device == TargetDevice.CPU_SPR:
raise RuntimeError("target_device == CPU_SPR is not supported")
nncf_config = _create_nncf_config(
preset, target_device, subset_size, model_type, ignored_scope, advanced_parameters
)
calibration_data_loader = CalibrationDataLoader(calibration_dataset)
nncf_config.register_extra_structs(
[
QuantizationRangeInitArgs(data_loader=calibration_data_loader),
BNAdaptationInitArgs(data_loader=calibration_data_loader),
]
)
def wrap_inputs(args, kwargs):
return wrap_nncf_model_inputs_with_objwalk(args, kwargs)
def wrap_outputs(retval):
return wrap_nncf_model_outputs_with_objwalk(retval)
def create_dummy_forward_fn(data_loader, device):
def dummy_forward(model):
with no_nncf_trace():
data_item = next(iter(data_loader))
args, kwargs = data_loader.get_inputs(data_item)
def send_to_device(tensor):
return tensor.to(device)
args = objwalk(args, is_tensor, send_to_device)
kwargs = objwalk(kwargs, is_tensor, send_to_device)
args, kwargs = wrap_inputs(args, kwargs)
retval = model(*args, **kwargs)
retval = replicate_same_tensors(retval)
return wrap_outputs(retval)
return dummy
|
statistics used for quantization.
:param model_type: Model type is needed to specify additional patterns
in the model.
:param ignored_scope: An ignored scope that defined the list of model control
|
random_line_split
|
quantize_model.py
|
agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple
import torch
from nncf.common.quantization.structs import QuantizationPreset
from nncf.config import NNCFConfig
from nncf.config.structures import BNAdaptationInitArgs
from nncf.config.structures import QuantizationRangeInitArgs
from nncf.data import Dataset
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import apply_advanced_parameters_to_config
from nncf.scopes import IgnoredScope
from nncf.scopes import convert_ignored_scope_to_list
from nncf.torch.dynamic_graph.context import no_nncf_trace
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.initialization import PTInitializingDataLoader
from nncf.torch.model_creation import create_compressed_model
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_module_replacement import replace_modules_by_nncf_modules
from nncf.torch.quantization.weights_compression import insert_pre_compression_operations
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
DEFAULT_RANGE_TYPE = "mean_min_max"
# TODO(alexsu52): It is a workaround and should be removed.
class CalibrationDataLoader(PTInitializingDataLoader):
"""
This class wraps the nncf.Dataset.
This is required for proper initialization of certain compression algorithms.
"""
def __init__(self, data_loader: Dataset):
super().__init__(data_loader)
self._length = None
@property
def batch_size(self):
data_source = getattr(self._data_loader, "_data_source")
return getattr(data_source, "batch_size", 1)
def __iter__(self):
return iter(self._data_loader.get_inference_data())
def __len__(self):
if self._length is None:
data = self._data_loader.get_inference_data()
self._length = CalibrationDataLoader._get_length(data)
return self._length
def get_inputs(self, dataloader_output: Any) -> Tuple[Tuple, Dict]:
if not isinstance(dataloader_output, tuple):
dataloader_output = (dataloader_output,)
return dataloader_output, {}
@staticmethod
def
|
(iterable) -> int:
length = 0
for _ in iterable:
length = length + 1
return length
def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:
"""
Returns the quantization config for transformer-based models.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The quantization config for transformer-based models.
"""
return {
"algorithm": "quantization",
"preset": "mixed",
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": 0},
},
"scope_overrides": {"activations": {"{re}.*matmul_0": {"mode": "symmetric"}}},
"ignored_scopes": [
"{re}.*Embeddings.*",
"{re}.*__add___[0-1]",
"{re}.*layer_norm_0",
"{re}.*matmul_1",
"{re}.*__truediv__*",
],
"overflow_fix": "first_layer_only",
}
def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]:
"""
Returns the default quantization config
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The default quantization config.
"""
return {
"algorithm": "quantization",
"preset": preset.value,
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": subset_size},
},
"overflow_fix": "first_layer_only",
}
def _create_nncf_config(
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
model_type: Optional[ModelType],
ignored_scope: Optional[IgnoredScope],
advanced_parameters: Optional[AdvancedQuantizationParameters],
) -> NNCFConfig:
"""
Creates the NNCFConfig for the quantization algorithm.
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param target_device: A target device the specificity of which will be taken
into account while compressing in order to obtain the best performance
for this type of device.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:param model_type: Model type is needed to specify additional patterns
in the model.
:param ignored_scope: An ignored scope that defined the list of model control
flow graph nodes to be ignored during quantization.
:param advanced_parameters: Advanced quantization parameters for
fine-tuning the quantization algorithm.
:return: NNCFConfig for the quantization algorithm.
"""
if model_type is None:
compression_config = _get_default_quantization_config(preset, subset_size)
elif model_type == ModelType.TRANSFORMER:
compression_config = _get_transformer_quantization_config(subset_size)
if ignored_scope is not None:
_ignored_scope = convert_ignored_scope_to_list(ignored_scope)
if "ignored_scopes" in compression_config:
compression_config["ignored_scopes"].extend(_ignored_scope)
else:
compression_config["ignored_scopes"] = _ignored_scope
compression_config["validate_scopes"] = ignored_scope.validate
if advanced_parameters is not None:
compression_config = apply_advanced_parameters_to_config(compression_config, advanced_parameters)
if model_type == ModelType.TRANSFORMER:
compression_config["validate_scopes"] = False
return NNCFConfig({"target_device": target_device.value, "compression": compression_config})
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
fast_bias_correction: bool,
model_type: Optional[ModelType] = None,
ignored_scope: Optional[IgnoredScope] = None,
advanced_parameters: Optional[AdvancedQuantizationParameters] = None,
) -> torch.nn.Module:
"""
Implementation of the `quantize()` method for the PyTorch backend.
"""
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not " "supported")
if ignored_scope is not None and ignored_scope.types:
raise RuntimeError(
"Quantization algorithm from the PyTorch backend "
"does not support operation types in the ignored "
"scopes yet"
)
if target_device == TargetDevice.CPU_SPR:
raise RuntimeError("target_device == CPU_SPR is not supported")
nncf_config = _create_nncf_config(
preset, target_device, subset_size, model_type, ignored_scope, advanced_parameters
)
calibration_data_loader = CalibrationDataLoader(calibration_dataset)
nncf_config.register_extra_structs(
[
QuantizationRangeInitArgs(data_loader=calibration_data_loader),
BNAdaptationInitArgs(data_loader=calibration_data_loader),
]
)
def wrap_inputs(args, kwargs):
return wrap_nncf_model_inputs_with_objwalk(args, kwargs)
def wrap_outputs(retval):
return wrap_nncf_model_outputs_with_objwalk(retval)
def create_dummy_forward_fn(data_loader, device):
def dummy_forward(model):
with no_nncf_trace():
data_item = next(iter(data_loader))
args, kwargs = data_loader.get_inputs(data_item)
def send_to_device(tensor):
return tensor.to(device)
args = objwalk(args, is_tensor, send_to_device)
kwargs = objwalk(kwargs, is_tensor, send_to_device)
args, kwargs = wrap_inputs(args, kwargs)
retval = model(*args, **kwargs)
retval = replicate_same_tensors(retval)
return wrap_outputs(retval)
return
|
_get_length
|
identifier_name
|
quantize_model.py
|
to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any, Dict, Optional, Tuple
import torch
from nncf.common.quantization.structs import QuantizationPreset
from nncf.config import NNCFConfig
from nncf.config.structures import BNAdaptationInitArgs
from nncf.config.structures import QuantizationRangeInitArgs
from nncf.data import Dataset
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import apply_advanced_parameters_to_config
from nncf.scopes import IgnoredScope
from nncf.scopes import convert_ignored_scope_to_list
from nncf.torch.dynamic_graph.context import no_nncf_trace
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.initialization import PTInitializingDataLoader
from nncf.torch.model_creation import create_compressed_model
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_module_replacement import replace_modules_by_nncf_modules
from nncf.torch.quantization.weights_compression import insert_pre_compression_operations
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
DEFAULT_RANGE_TYPE = "mean_min_max"
# TODO(alexsu52): It is a workaround and should be removed.
class CalibrationDataLoader(PTInitializingDataLoader):
"""
This class wraps the nncf.Dataset.
This is required for proper initialization of certain compression algorithms.
"""
def __init__(self, data_loader: Dataset):
super().__init__(data_loader)
self._length = None
@property
def batch_size(self):
data_source = getattr(self._data_loader, "_data_source")
return getattr(data_source, "batch_size", 1)
def __iter__(self):
return iter(self._data_loader.get_inference_data())
def __len__(self):
if self._length is None:
data = self._data_loader.get_inference_data()
self._length = CalibrationDataLoader._get_length(data)
return self._length
def get_inputs(self, dataloader_output: Any) -> Tuple[Tuple, Dict]:
if not isinstance(dataloader_output, tuple):
dataloader_output = (dataloader_output,)
return dataloader_output, {}
@staticmethod
def _get_length(iterable) -> int:
length = 0
for _ in iterable:
length = length + 1
return length
def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:
"""
Returns the quantization config for transformer-based models.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The quantization config for transformer-based models.
"""
return {
"algorithm": "quantization",
"preset": "mixed",
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": 0},
},
"scope_overrides": {"activations": {"{re}.*matmul_0": {"mode": "symmetric"}}},
"ignored_scopes": [
"{re}.*Embeddings.*",
"{re}.*__add___[0-1]",
"{re}.*layer_norm_0",
"{re}.*matmul_1",
"{re}.*__truediv__*",
],
"overflow_fix": "first_layer_only",
}
def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]:
"""
Returns the default quantization config
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:return: The default quantization config.
"""
return {
"algorithm": "quantization",
"preset": preset.value,
"initializer": {
"range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE},
"batchnorm_adaptation": {"num_bn_adaptation_samples": subset_size},
},
"overflow_fix": "first_layer_only",
}
def _create_nncf_config(
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
model_type: Optional[ModelType],
ignored_scope: Optional[IgnoredScope],
advanced_parameters: Optional[AdvancedQuantizationParameters],
) -> NNCFConfig:
"""
Creates the NNCFConfig for the quantization algorithm.
:param preset: A preset that controls the quantization mode
(symmetric and asymmetric). It can take the following values:
- `performance`: Symmetric quantization of weights and activations.
- `mixed`: Symmetric quantization of weights and asymmetric
quantization of activations.
:param target_device: A target device the specificity of which will be taken
into account while compressing in order to obtain the best performance
for this type of device.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:param model_type: Model type is needed to specify additional patterns
in the model.
:param ignored_scope: An ignored scope that defined the list of model control
flow graph nodes to be ignored during quantization.
:param advanced_parameters: Advanced quantization parameters for
fine-tuning the quantization algorithm.
:return: NNCFConfig for the quantization algorithm.
"""
if model_type is None:
compression_config = _get_default_quantization_config(preset, subset_size)
elif model_type == ModelType.TRANSFORMER:
compression_config = _get_transformer_quantization_config(subset_size)
if ignored_scope is not None:
_ignored_scope = convert_ignored_scope_to_list(ignored_scope)
if "ignored_scopes" in compression_config:
compression_config["ignored_scopes"].extend(_ignored_scope)
else:
compression_config["ignored_scopes"] = _ignored_scope
compression_config["validate_scopes"] = ignored_scope.validate
if advanced_parameters is not None:
compression_config = apply_advanced_parameters_to_config(compression_config, advanced_parameters)
if model_type == ModelType.TRANSFORMER:
compression_config["validate_scopes"] = False
return NNCFConfig({"target_device": target_device.value, "compression": compression_config})
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
fast_bias_correction: bool,
model_type: Optional[ModelType] = None,
ignored_scope: Optional[IgnoredScope] = None,
advanced_parameters: Optional[AdvancedQuantizationParameters] = None,
) -> torch.nn.Module:
"""
Implementation of the `quantize()` method for the PyTorch backend.
"""
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not " "supported")
if ignored_scope is not None and ignored_scope.types:
raise RuntimeError(
"Quantization algorithm from the PyTorch backend "
"does not support operation types in the ignored "
"scopes yet"
)
if target_device == TargetDevice.CPU_SPR:
|
nncf_config = _create_nncf_config(
preset, target_device, subset_size, model_type, ignored_scope, advanced_parameters
)
calibration_data_loader = CalibrationDataLoader(calibration_dataset)
nncf_config.register_extra_structs(
[
QuantizationRangeInitArgs(data_loader=calibration_data_loader),
BNAdaptationInitArgs(data_loader=calibration_data_loader),
]
)
def wrap_inputs(args, kwargs):
return wrap_nncf_model_inputs_with_objwalk(args, kwargs)
def wrap_outputs(retval):
return wrap_nncf_model_outputs_with_objwalk(retval)
def create_dummy_forward_fn(data_loader, device):
def dummy_forward(model):
with no_nncf_trace():
data_item = next(iter(data_loader))
args, kwargs = data_loader.get_inputs(data_item)
def send_to_device(tensor):
return tensor.to(device)
args = objwalk(args, is_tensor, send_to_device)
kwargs = objwalk(kwargs, is_tensor, send_to_device)
args, kwargs = wrap_inputs(args, kwargs)
retval = model(*args, **kwargs)
retval = replicate_same_tensors(retval)
return wrap_outputs(retval)
return
|
raise RuntimeError("target_device == CPU_SPR is not supported")
|
conditional_block
|
be_sr.js
|
GM": {message: "Гамбија"},
"LV": {message: "Летонија"},
"RU": {message: "Русија"},
"FI": {message: "Финска"},
"Get Hola Plus for un-interrupted, ad-free service.": {message: "Набавите Хола плус за УН-прекида, без реклама сервиса."},
"LU": {message: "Луксембург"},
"VE": {message: "Венецуела"},
"TV": {message: "Тувалу"},
"VI": {message: "С.А.Д. Девичанска Острва"},
"SN": {message: "Сенегал"},
"MX": {message: "Мексико"},
"IL": {message: "Израел"},
"GG": {message: "Гурнси"},
"Author site:": {message: "Аутор сајта:"},
"HU": {message: "Мађарска"},
"DO": {message: "Доминиканска Република"},
"OFF": {message: "ОФФ"},
"KH": {message: "Камбоџа"},
"TG": {message: "Того"},
"Hola cannot work properly because another extension is controlling your proxy settings. Please disable other extensions that you think might control your proxy settings in <a>extensions</a> (such as ad-blockers, other VPN services, etc.).": {message: "Хола не може правилно да ради јер на другом локалу контролише поставке проки. Молимо онемогућите друге додатке које мислите да би могли контролисати поставке проки у <a> наставцима </a> (као што је ад-блокаторе, других ВПН услуге, итд)."},
"BB": {message: "Барбадос"},
"JE": {message: "Џерси"},
"DK": {message: "Данска"},
"PA": {message: "Панама"},
"CV": {message: "Капе Верде"},
"QA": {message: "Катар"},
"Reload": {message: "Поново напунити"},
"GD": {message: "Гренада"},
"Number of users that use this option": {message: "Број корисника који користе ову опцију"},
"MO": {message: "Макао С. А. Р. Кина"},
"MF": {message: "Сент Мартин"},
"HR": {message: "Хрватска"},
"CZ": {message: "Чешка"},
"BL": {message: "Свети Бартоломеј"},
"ST": {message: "Сао Томе и Принципе"},
"AU": {message: "Аустралија"},
"IR": {message: "Иран"},
"CG": {message: "Конго"},
"BI": {message: "Бурунди"},
"GW": {message: "Гвинеја-Бисао"},
"MK": {message: "Македонија"},
"GR": {message: "Грчка"},
"AG": {message: "Антигве и Барбуда"},
"AI": {message: "Ангвила"},
"AN": {message: "Холандски Антили"},
"UA": {message: "Украјина"},
"EH": {message: "Западна Сахара"},
"KN": {message: "Сент Китс и Невис"},
"SC": {message: "Сејшели"},
"NL": {message: "Холандија"},
"MS": {message: "Монсерат"},
"HK": {message: "Хонг Конг С. А. Р. Кина"},
"EC": {message: "Еквадор"},
"MY": {message: "Малезија"},
"CR": {message: "Костарика"},
"VA": {message: "Ватикан"},
"IO": {message: "Британска територија у Индијском океану"},
"SD": {message: "Судан"},
"RS": {message: "Србија"},
"CN": {message: "Кина"},
"UY": {message: "Уругвај"},
"PY": {message: "Парагвај"},
"MU": {message: "Маурицијус"},
"CH": {message: "Швајцарска"},
"LI": {message: "Лихтенштајн"},
"GH": {message: "Гана"},
"KG": {message: "Киргизстан"},
"NU": {message: "Ниуе"},
"US": {message: "Сједињене Америчке Државе"},
"PE": {message: "Перу"},
"SL": {message: "Сијера Леоне"},
"FJ": {message: "Фиџи"},
"ER": {message: "Еритреја"},
"IQ": {message: "Ирак"},
"AS": {message: "Америчка Самоа"},
"TZ": {message: "Танзанија"},
"LY": {message: "Либија"},
"GT": {message: "Гватемала"},
"BM": {message: "Бермуда"},
"BV": {message: "Буве Острва"},
"LT": {message: "Литванија"},
"SG": {message: "Сингапур"},
"PM": {message: "Сен Пјер и Микелон"},
"Initializing...": {message: "Покретање ..."},
"TT": {message: "Тринидад и Тобаго"},
"Hola does not work well in Windows 8 mode. Please switch to desktop mode. Click <a>here</a> for instructions": {message: "Хола не ради добро у Виндовс 8 режиму. Молимо вас пребацили на десктоп режим. Кликните <a> овде </a> за упутства"},
"SK": {message: "Словачка"},
"SY": {message: "Сирија"},
"GL": {message: "Гренланд"},
"PG": {message: "Папуа Нова Гвинеја"},
"KI": {message: "Кирибати"},
"CD": {message: "Демократска република Конго"},
"AO": {message: "Ангола"},
"BW": {message: "Боцвана"},
"ZW": {message: "Зимбабве"},
"VC": {message: "Сент Винсент и Гренадини"},
"JP": {message: "Јапан"},
"NA": {message: "Намибија"},
"TJ": {message: "Таџикистан"},
"LC": {message: "Сент Луција"},
"VU": {message: "Вануату"},
"MN": {message: "Монголија"},
"Hola site list": {message: "Унблоцкер сајт листа"},
"IT": {message: "Италија"},
"RE": {message: "Реинион"},
"WS": {message: "Самоа"},
"Enable": {message: "Омогућити"},
"Loading": {message: "Утовар"},
"EG": {message: "Египат"},
"FR": {message: "Француска"},
"start": {message: "старт"},
"RW": {message: "Руанда"},
"BE": {message: "Белгија"},
"UM": {message: "Мања удаљена острва САД"},
"Accelerator": {message: "Акцелератор"},
"LS": {message: "Лесото"},
"SA": {message: "Саудијска Арабија"},
"ZA": {message: "Јужноафричка Република"},
|
"PT": {message: "Португал"},
|
random_line_split
|
|
api_op_GetCredentials.go
|
"
)
// Returns a database user name and temporary password with temporary
// authorization to log in to Amazon Redshift Serverless. By default, the temporary
// credentials expire in 900 seconds. You can optionally specify a duration between
// 900 seconds (15 minutes) and 3600 seconds (60 minutes). The Identity and Access
// Management (IAM) user or role that runs GetCredentials must have an IAM policy
// attached that allows access to all necessary actions and resources. If the
// DbName parameter is specified, the IAM policy must allow access to the resource
// dbname for the specified database name.
func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) {
if params == nil {
params = &GetCredentialsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetCredentials", params, optFns, c.addOperationGetCredentialsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetCredentialsOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetCredentialsInput struct {
// The name of the workgroup associated with the database.
//
// This member is required.
WorkgroupName *string
// The name of the database to get temporary authorization to log on to.
// Constraints:
// - Must be 1 to 64 alphanumeric characters or hyphens.
// - Must contain only uppercase or lowercase letters, numbers, underscore, plus
// sign, period (dot), at symbol (@), or hyphen.
// - The first character must be a letter.
// - Must not contain a colon ( : ) or slash ( / ).
// - Cannot be a reserved word. A list of reserved words can be found in
// Reserved Words (https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
// in the Amazon Redshift Database Developer Guide
DbName *string
// The number of seconds until the returned temporary password expires. The
// minimum is 900 seconds, and the maximum is 3600 seconds.
DurationSeconds *int32
noSmithyDocumentSerde
}
type GetCredentialsOutput struct {
// A temporary password that authorizes the user name returned by DbUser to log on
// to the database DbName .
DbPassword *string
// A database user name that is authorized to log on to the database DbName using
// the password DbPassword . If the specified DbUser exists in the database, the
// new user name has the same database privileges as the the user named in DbUser .
// By default, the user is added to PUBLIC.
DbUser *string
// The date and time the password in DbPassword expires.
Expiration *time.Time
// The date and time of when the DbUser and DbPassword authorization refreshes.
NextRefreshTime *time.Time
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addGetCredentialsResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpGetCredentialsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCredentials(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetCredentials(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "redshift-serverless",
OperationName: "GetCredentials",
}
}
type opGetCredentialsResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opGetCredentialsResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opGetCredentialsResolveEndpointMiddleware)
|
(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "redshift-serverless"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "redshift-serverless"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
|
HandleSerialize
|
identifier_name
|
api_op_GetCredentials.go
|
"
)
// Returns a database user name and temporary password with temporary
// authorization to log in to Amazon Redshift Serverless. By default, the temporary
// credentials expire in 900 seconds. You can optionally specify a duration between
// 900 seconds (15 minutes) and 3600 seconds (60 minutes). The Identity and Access
// Management (IAM) user or role that runs GetCredentials must have an IAM policy
// attached that allows access to all necessary actions and resources. If the
// DbName parameter is specified, the IAM policy must allow access to the resource
// dbname for the specified database name.
func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) {
if params == nil {
params = &GetCredentialsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetCredentials", params, optFns, c.addOperationGetCredentialsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetCredentialsOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetCredentialsInput struct {
// The name of the workgroup associated with the database.
//
// This member is required.
WorkgroupName *string
// The name of the database to get temporary authorization to log on to.
// Constraints:
// - Must be 1 to 64 alphanumeric characters or hyphens.
// - Must contain only uppercase or lowercase letters, numbers, underscore, plus
// sign, period (dot), at symbol (@), or hyphen.
// - The first character must be a letter.
// - Must not contain a colon ( : ) or slash ( / ).
// - Cannot be a reserved word. A list of reserved words can be found in
// Reserved Words (https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
// in the Amazon Redshift Database Developer Guide
DbName *string
// The number of seconds until the returned temporary password expires. The
// minimum is 900 seconds, and the maximum is 3600 seconds.
DurationSeconds *int32
noSmithyDocumentSerde
}
type GetCredentialsOutput struct {
// A temporary password that authorizes the user name returned by DbUser to log on
// to the database DbName .
DbPassword *string
// A database user name that is authorized to log on to the database DbName using
// the password DbPassword . If the specified DbUser exists in the database, the
// new user name has the same database privileges as the the user named in DbUser .
// By default, the user is added to PUBLIC.
DbUser *string
// The date and time the password in DbPassword expires.
Expiration *time.Time
// The date and time of when the DbUser and DbPassword authorization refreshes.
NextRefreshTime *time.Time
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error)
|
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addGetCredentialsResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpGetCredentialsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCredentials(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetCredentials(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "redshift-serverless",
OperationName: "GetCredentials",
}
}
type opGetCredentialsResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opGetCredentialsResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opGetCredentialsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "redshift-serverless"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "redshift-serverless"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
|
{
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
|
identifier_body
|
api_op_GetCredentials.go
|
"
)
// Returns a database user name and temporary password with temporary
// authorization to log in to Amazon Redshift Serverless. By default, the temporary
// credentials expire in 900 seconds. You can optionally specify a duration between
// 900 seconds (15 minutes) and 3600 seconds (60 minutes). The Identity and Access
// Management (IAM) user or role that runs GetCredentials must have an IAM policy
// attached that allows access to all necessary actions and resources. If the
// DbName parameter is specified, the IAM policy must allow access to the resource
// dbname for the specified database name.
func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) {
if params == nil {
params = &GetCredentialsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetCredentials", params, optFns, c.addOperationGetCredentialsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetCredentialsOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetCredentialsInput struct {
// The name of the workgroup associated with the database.
//
// This member is required.
WorkgroupName *string
// The name of the database to get temporary authorization to log on to.
// Constraints:
// - Must be 1 to 64 alphanumeric characters or hyphens.
// - Must contain only uppercase or lowercase letters, numbers, underscore, plus
// sign, period (dot), at symbol (@), or hyphen.
// - The first character must be a letter.
// - Must not contain a colon ( : ) or slash ( / ).
// - Cannot be a reserved word. A list of reserved words can be found in
// Reserved Words (https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
// in the Amazon Redshift Database Developer Guide
DbName *string
// The number of seconds until the returned temporary password expires. The
// minimum is 900 seconds, and the maximum is 3600 seconds.
DurationSeconds *int32
noSmithyDocumentSerde
}
type GetCredentialsOutput struct {
// A temporary password that authorizes the user name returned by DbUser to log on
// to the database DbName .
DbPassword *string
// A database user name that is authorized to log on to the database DbName using
// the password DbPassword . If the specified DbUser exists in the database, the
// new user name has the same database privileges as the the user named in DbUser .
// By default, the user is added to PUBLIC.
DbUser *string
// The date and time the password in DbPassword expires.
Expiration *time.Time
// The date and time of when the DbUser and DbPassword authorization refreshes.
NextRefreshTime *time.Time
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addGetCredentialsResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpGetCredentialsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCredentials(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetCredentials(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "redshift-serverless",
OperationName: "GetCredentials",
}
}
type opGetCredentialsResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opGetCredentialsResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opGetCredentialsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "redshift-serverless"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil
|
else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
|
{
signingName = "redshift-serverless"
}
|
conditional_block
|
api_op_GetCredentials.go
|
// Returns a database user name and temporary password with temporary
// authorization to log in to Amazon Redshift Serverless. By default, the temporary
// credentials expire in 900 seconds. You can optionally specify a duration between
// 900 seconds (15 minutes) and 3600 seconds (60 minutes). The Identity and Access
// Management (IAM) user or role that runs GetCredentials must have an IAM policy
// attached that allows access to all necessary actions and resources. If the
// DbName parameter is specified, the IAM policy must allow access to the resource
// dbname for the specified database name.
func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) {
if params == nil {
params = &GetCredentialsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetCredentials", params, optFns, c.addOperationGetCredentialsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetCredentialsOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetCredentialsInput struct {
// The name of the workgroup associated with the database.
//
// This member is required.
WorkgroupName *string
// The name of the database to get temporary authorization to log on to.
// Constraints:
// - Must be 1 to 64 alphanumeric characters or hyphens.
// - Must contain only uppercase or lowercase letters, numbers, underscore, plus
// sign, period (dot), at symbol (@), or hyphen.
// - The first character must be a letter.
// - Must not contain a colon ( : ) or slash ( / ).
// - Cannot be a reserved word. A list of reserved words can be found in
// Reserved Words (https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html)
// in the Amazon Redshift Database Developer Guide
DbName *string
// The number of seconds until the returned temporary password expires. The
// minimum is 900 seconds, and the maximum is 3600 seconds.
DurationSeconds *int32
noSmithyDocumentSerde
}
type GetCredentialsOutput struct {
// A temporary password that authorizes the user name returned by DbUser to log on
// to the database DbName .
DbPassword *string
// A database user name that is authorized to log on to the database DbName using
// the password DbPassword . If the specified DbUser exists in the database, the
// new user name has the same database privileges as the the user named in DbUser .
// By default, the user is added to PUBLIC.
DbUser *string
// The date and time the password in DbPassword expires.
Expiration *time.Time
// The date and time of when the DbUser and DbPassword authorization refreshes.
NextRefreshTime *time.Time
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCredentials{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addGetCredentialsResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpGetCredentialsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCredentials(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetCredentials(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "redshift-serverless",
OperationName: "GetCredentials",
}
}
type opGetCredentialsResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opGetCredentialsResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opGetCredentialsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "redshift-serverless"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "redshift-serverless"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4
|
"time"
)
|
random_line_split
|
|
api.ts
|
return request({ url: path + 'tjapi/user/publicAccountCodeLogin', data: {channel, subChannel, identity, ...param}, method: 'POST' })
}
/*** 退出 */
export const doLoginOut = function(param) {
return request({ url: path + 'tjapi/user/logout', data: param, method: 'POST' })
}
/*** 账户 */
export const mine = function(param) {
// return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
return appLaunchQueue.delayWhenAppLaunchLoaded().then(function() {
return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
})
}
/*** 订单创建 */
export const orderCreate = async function(param, flag) {
const carConfigDetailModelList = storage.get('carConfigDetailModelList')
let extraParam = {}
if (carConfigDetailModelList) {
const version_price = parseFloat(carConfigDetailModelList[0]['price'].replace(/¥|,/ig,'')) * 100
const orderProductAttrs = [// type 值固定,格式如下;
{'type': 'VERSION', 'value': '尊享版', 'price': version_price}, // 版本
{'type': 'COLOUR', 'value': carConfigDetailModelList[1]['title'], 'price': 0} // 颜色
]
if (carConfigDetailModelList[2]) {
orderProductAttrs.push({'type': 'WHEEL', 'value': carConfigDetailModelList[2]['title'], 'price': 0})
}
if (carConfigDetailModelList[3]) {
orderProductAttrs.push({'type': 'INTERIOR', 'value': carConfigDetailModelList[3]['title'], 'price': 0})
}
if (carConfigDetailModelList[4]) {
const optional_price = parseFloat(carConfigDetailModelList[4]['price'].replace(/¥|,/ig,'')) * 100
orderProductAttrs.push({'type': 'OPTIONAL', 'value': carConfigDetailModelList[4]['title'], 'price': optional_price})
}
extraParam = {
'pickCarDate': '2020年1月份', // 新增提车日期
'version': '1.0', // 新增,新接口固定该值
'orderProductAttrs': orderProductAttrs
}
}
// 加上推荐码
const url = flag==='update' ? 'tjapi/order/updateNew' : 'tjapi/order/create'
const recommend = localGet('recommend') || ''
const channel = localGet('channel') || ''
const subChannel = localGet('subChannel') || ''
const result = await request({ url: path + url, data: { ...param, ...extraParam, openId: recommend, channel, subChannel }, method: 'POST' })
return result
}
/***
* 订单修改
*/
export const orderUpdate = async function(param) {
return await orderCreate(param, 'update')
}
/*** 订单列表 */
export const orderList = async function(param) {
const result = await request({ url: path + 'tjapi/order/list', data: param, method: 'POST' })
return result
}
/*** 订单支付 */
export const orderPay = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay', data: param, method: 'POST' })
return result
}
/*** 订单详情 */
export const orderDetail = async function(param) {
const result = await request({ url: path + 'tjapi/order/detail', data: param, method: 'POST' })
return result
}
/*** 支付结果查询 */
export const orderPayQuery = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay/query', data: param, method: 'POST' })
return result
}
/*** 取消订单 */
export const orderCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/cancel', data: param, method: 'POST' })
return result
}
/*** 申请退订 */
export const orderRefundApply = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply', data: param, method: 'POST' })
return result
}
/*** 撤销申请退款 */
export const orderRefundApplyCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply/cancel', data: param, method: 'POST' })
return result
}
/** 上传头像 */
export const uploadAva = function(param) {
const result = request({ url: path + 'tjapi/user/upload', data: param, method: 'POST' })
return result
}
/*****编辑用户信息 */
export const saveUserInfo = function(param) {
const result = request({ url: path + 'tjapi/user/save', data: param, method: 'POST' })
return result
}
// 签到
export const signIn = function(param) {
const result = request({ url: path + 'tjapi/signin/signin', data: param, method: 'POST' })
return result
}
export const tracklog = function(param) {
const result = request({ url: path + 'tjapi/log/push', data: param, method: 'POST' })
return result
}
/***积分明细 */
export const scoreList = function(param) {
const result = request({ url: path + 'tjapi/userscore/detail/query', data: param, method: 'POST' })
return result
}
/*****查询未读消息数据 */
export const messageReadCount = function(param) {
const result = request({ url: path + 'tjapi/message/readCount', data: param, method: 'POST' })
return result
}
/*****查询Udesk消息数据 */
export const gainUdeskMessage = function(param) {
const result = request({ url: 'https://dearcc.udesk.cn/' + 'open_api_v1/customers/get_customer', data: param, method: 'GET' })
return result
}
export const authWX = function(param) {
const result = request({ url: 'https://open.weixin.qq.com/connect/qrconnect', data: param, method: 'GET' })
return result
}
/*****查询消息详细 */
export const gainMessageDetail = function(param) {
const result = request({ url: path + 'tjapi/message/temp/msgSendDetailList', data: param, method: 'POST' })
return result
}
/*****解析手机号码 */
export const decryptPhoneNumber = function(param) {
const result = request({ url: path + 'tjapi/wechat/decrypt', data: param, method: 'POST' })
return result
}
/*****获取意向金 */
export const orderGetAmount = function(param) {
const result = request({ url: path + 'tjapi/order/getAmount', data: param, method: 'POST' })
return result
}
/**获取字符渠道*/
export const orderGetChannel = function(param) {
const result = request({ url: path + 'tjapi/config/queryPayConfig', data: param, method: 'POST' })
return result
}
/**微信formID提交*/
export const submitFormId = function(param) {
const result = request({ url: path + 'tjapi/message/temp/saveForm', data: param, method: 'POST' })
return result
}
export const shareOrder = function(param) {
const result = request({ url: path + 'tjapi/wechat/shareOrder', data: param, method: 'POST' })
return result
}
export const getShareConfig = function(param) {
const result = request({ url: path + 'tjapi/wechat/getShareConfig', data: param, method: 'POST' })
return result
}
export const getWxacode = function(param) {
// const result = request({ url: path + 'tjapi/wechat/shareOrder', data: param, method: 'POST' })
// const result = request({ url: path + 'tjapi/wechatImg/getPageCode', data: param, method: 'GET' })
let url = path + 'tjapi/wechatImg/getPageCode?' + UrlParse.getSearchQuery(param)
return url
}
/*** 新增留资信息 */
export const createLeaveInfo = function(param) {
return request({ url: path + 'tjapi/source/add', data: param, method: 'POST' })
}
//****微信端是否已经绑定 */
export const isWXBinded = function(param) {
return request({ url: path + 'tjapi/user/isBinded', data: param, method: 'POST' })
}
function loopLaunchApp() {
let timeID
timeID = setInterval(async function() {
try {
await doInitWxApp()
clearInterval(timeID)
appLaunchQueue.appLoad()
} catch (e) {
MP.Tip(e.errMsg)
}
}, 1000)
}
export async function initApp() {
loopLaunchApp()
}
|
identifier_name
|
||
api.ts
|
}
export const appLaunchQueue = launchQueue()
// 获取用户类型
export interface UserInfo
Interface {
openid: string,
avatarUrl: string,
city: string,
country: string,
gender: number,
language: string,
nickName: string,
province: string
}
export const useropenid = () => {
}
export const weixinSign = function(param) {
return request({ url: path + 'common/weixinSign', data: param, method: 'POST' })
}
export const getUserInfo: (payload: object) => Promise<UserInfoInterface> = async function(payload) {
const result: Promise<UserInfoInterface> = await request({ url: path + 'common/getuserInfo', method: 'POST', data: payload })
return result
}
export const bindingWeChat = function(param) {
return request({ url: path + 'tjapi/user/bindingWeChat', data: param, method: 'POST' })
}
export const bindingWeChatNoCheck = function(param) {
return request({ url: path + 'tjapi/user/bindingWeChatNoCheck', data: param, method: 'POST' })
}
// 微信小程序登录的接口
interface Code2sessionResult {
thirdSession: string,
loginCode: string
}
export const code2session = async function(appid, code) {
const result: Code2sessionResult = await request({ url: path + 'tjapi/user/wxCodeLogin', data: { appid, code }, method: 'POST' })
return result
}
// 微信小程序 微信授权登录 保存用户信息
export const saveWxUserInfoServer = async function(param) {
const result = await request({ url: path + 'common/saveWxUserInfoServer', data: param, method: 'POST' })
return result
}
export const getCity = async function() {
const result = await request({ url: path + 'tjapi/city/getProvinceList', method: 'POST' })
return result
}
// 获取列表
export const getList = async function() {
const result = await request({ url: path + 'common/getList', method: 'POST' })
return result
}
export const getGeolocation = async function(param) {
const result = await request({ url: path + 'tjapi/city/coords/transfer', data: param, method: 'POST' })
return result
}
/*发送手机验证码*/
export const sendRegisterMobileCode = async function(param) {
return request({ url: path + 'tjapi/user/sendCode', data: param, method: 'POST' })
}
/*** 登录 */
export const doLogin = function(param) {
return request({ url: path + 'tjapi/user/codeLogin', data: param, method: 'POST' })
}
/***公众号H5绑定 */
export const doWXH5Login = function(param) {
const channel = Cookie.get('channel')
const subChannel = Cookie.get('subChannel')
const identity = Cookie.get('identity') || getOpenId()
return request({ url: path + 'tjapi/user/publicAccountCodeLogin', data: {channel, subChannel, identity, ...param}, method: 'POST' })
}
/*** 退出 */
export const doLoginOut = function(param) {
return request({ url: path + 'tjapi/user/logout', data: param, method: 'POST' })
}
/*** 账户 */
export const mine = function(param) {
// return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
return appLaunchQueue.delayWhenAppLaunchLoaded().then(function() {
return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
})
}
/*** 订单创建 */
export const orderCreate = async function(param, flag) {
const carConfigDetailModelList = storage.get('carConfigDetailModelList')
let extraParam = {}
if (carConfigDetailModelList) {
const version_price = parseFloat(carConfigDetailModelList[0]['price'].replace(/¥|,/ig,'')) * 100
const orderProductAttrs = [// type 值固定,格式如下;
{'type': 'VERSION', 'value': '尊享版', 'price': version_price}, // 版本
{'type': 'COLOUR', 'value': carConfigDetailModelList[1]['title'], 'price': 0} // 颜色
]
if (carConfigDetailModelList[2]) {
orderProductAttrs.push({'type': 'WHEEL', 'value': carConfigDetailModelList[2]['title'], 'price': 0})
}
if (carConfigDetailModelList[3]) {
orderProductAttrs.push({'type': 'INTERIOR', 'value': carConfigDetailModelList[3]['title'], 'price': 0})
}
if (carConfigDetailModelList[4]) {
const optional_price = parseFloat(carConfigDetailModelList[4]['price'].replace(/¥|,/ig,'')) * 100
orderProductAttrs.push({'type': 'OPTIONAL', 'value': carConfigDetailModelList[4]['title'], 'price': optional_price})
}
extraParam = {
'pickCarDate': '2020年1月份', // 新增提车日期
'version': '1.0', // 新增,新接口固定该值
'orderProductAttrs': orderProductAttrs
}
}
// 加上推荐码
const url = flag==='update' ? 'tjapi/order/updateNew' : 'tjapi/order/create'
const recommend = localGet('recommend') || ''
const channel = localGet('channel') || ''
const subChannel = localGet('subChannel') || ''
const result = await request({ url: path + url, data: { ...param, ...extraParam, openId: recommend, channel, subChannel }, method: 'POST' })
return result
}
/***
* 订单修改
*/
export const orderUpdate = async function(param) {
return await orderCreate(param, 'update')
}
/*** 订单列表 */
export const orderList = async function(param) {
const result = await request({ url: path + 'tjapi/order/list', data: param, method: 'POST' })
return result
}
/*** 订单支付 */
export const orderPay = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay', data: param, method: 'POST' })
return result
}
/*** 订单详情 */
export const orderDetail = async function(param) {
const result = await request({ url: path + 'tjapi/order/detail', data: param, method: 'POST' })
return result
}
/*** 支付结果查询 */
export const orderPayQuery = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay/query', data: param, method: 'POST' })
return result
}
/*** 取消订单 */
export const orderCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/cancel', data: param, method: 'POST' })
return result
}
/*** 申请退订 */
export const orderRefundApply = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply', data: param, method: 'POST' })
return result
}
/*** 撤销申请退款 */
export const orderRefundApplyCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply/cancel', data: param, method: 'POST' })
return result
}
/** 上传头像 */
export const uploadAva = function(param) {
const result = request({ url: path + 'tjapi/user/upload', data: param, method: 'POST' })
return result
}
/*****编辑用户信息 */
export const saveUserInfo = function(param) {
const result = request({ url: path + 'tjapi/user/save', data: param, method: 'POST' })
return result
}
// 签到
export const signIn = function(param) {
const result = request({ url: path + 'tjapi/signin/signin', data: param, method: 'POST' })
return result
}
export const tracklog = function(param) {
const result = request({ url: path + 'tjapi/log/push', data: param, method: 'POST' })
return result
}
/***积分明细 */
export const scoreList = function(param) {
const result = request({ url: path + 'tjapi/userscore/detail/query', data: param, method: 'POST' })
return result
}
/*****查询未读消息数据 */
export const
|
delayWhenAppLaunchLoaded: function() {
let self = this
return new Promise(function(resolve) {
if (self.loaded) {
resolve()
}
if (mpvuePlatform === 'h5') {
resolve()
} else {
let appLaunchId = setInterval(function() {
if (self.loaded) {
resolve()
clearInterval(appLaunchId)
}
}, 100)
}
})
}
}
|
identifier_body
|
|
api.ts
|
city: string,
country: string,
gender: number,
language: string,
nickName: string,
province: string
}
export const useropenid = () => {
}
export const weixinSign = function(param) {
return request({ url: path + 'common/weixinSign', data: param, method: 'POST' })
}
export const getUserInfo: (payload: object) => Promise<UserInfoInterface> = async function(payload) {
const result: Promise<UserInfoInterface> = await request({ url: path + 'common/getuserInfo', method: 'POST', data: payload })
return result
}
export const bindingWeChat = function(param) {
return request({ url: path + 'tjapi/user/bindingWeChat', data: param, method: 'POST' })
}
export const bindingWeChatNoCheck = function(param) {
return request({ url: path + 'tjapi/user/bindingWeChatNoCheck', data: param, method: 'POST' })
}
// 微信小程序登录的接口
interface Code2sessionResult {
thirdSession: string,
loginCode: string
}
|
return result
}
// 微信小程序 微信授权登录 保存用户信息
export const saveWxUserInfoServer = async function(param) {
const result = await request({ url: path + 'common/saveWxUserInfoServer', data: param, method: 'POST' })
return result
}
export const getCity = async function() {
const result = await request({ url: path + 'tjapi/city/getProvinceList', method: 'POST' })
return result
}
// 获取列表
export const getList = async function() {
const result = await request({ url: path + 'common/getList', method: 'POST' })
return result
}
export const getGeolocation = async function(param) {
const result = await request({ url: path + 'tjapi/city/coords/transfer', data: param, method: 'POST' })
return result
}
/*发送手机验证码*/
export const sendRegisterMobileCode = async function(param) {
return request({ url: path + 'tjapi/user/sendCode', data: param, method: 'POST' })
}
/*** 登录 */
export const doLogin = function(param) {
return request({ url: path + 'tjapi/user/codeLogin', data: param, method: 'POST' })
}
/***公众号H5绑定 */
export const doWXH5Login = function(param) {
const channel = Cookie.get('channel')
const subChannel = Cookie.get('subChannel')
const identity = Cookie.get('identity') || getOpenId()
return request({ url: path + 'tjapi/user/publicAccountCodeLogin', data: {channel, subChannel, identity, ...param}, method: 'POST' })
}
/*** 退出 */
export const doLoginOut = function(param) {
return request({ url: path + 'tjapi/user/logout', data: param, method: 'POST' })
}
/*** 账户 */
export const mine = function(param) {
// return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
return appLaunchQueue.delayWhenAppLaunchLoaded().then(function() {
return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
})
}
/*** 订单创建 */
export const orderCreate = async function(param, flag) {
const carConfigDetailModelList = storage.get('carConfigDetailModelList')
let extraParam = {}
if (carConfigDetailModelList) {
const version_price = parseFloat(carConfigDetailModelList[0]['price'].replace(/¥|,/ig,'')) * 100
const orderProductAttrs = [// type 值固定,格式如下;
{'type': 'VERSION', 'value': '尊享版', 'price': version_price}, // 版本
{'type': 'COLOUR', 'value': carConfigDetailModelList[1]['title'], 'price': 0} // 颜色
]
if (carConfigDetailModelList[2]) {
orderProductAttrs.push({'type': 'WHEEL', 'value': carConfigDetailModelList[2]['title'], 'price': 0})
}
if (carConfigDetailModelList[3]) {
orderProductAttrs.push({'type': 'INTERIOR', 'value': carConfigDetailModelList[3]['title'], 'price': 0})
}
if (carConfigDetailModelList[4]) {
const optional_price = parseFloat(carConfigDetailModelList[4]['price'].replace(/¥|,/ig,'')) * 100
orderProductAttrs.push({'type': 'OPTIONAL', 'value': carConfigDetailModelList[4]['title'], 'price': optional_price})
}
extraParam = {
'pickCarDate': '2020年1月份', // 新增提车日期
'version': '1.0', // 新增,新接口固定该值
'orderProductAttrs': orderProductAttrs
}
}
// 加上推荐码
const url = flag==='update' ? 'tjapi/order/updateNew' : 'tjapi/order/create'
const recommend = localGet('recommend') || ''
const channel = localGet('channel') || ''
const subChannel = localGet('subChannel') || ''
const result = await request({ url: path + url, data: { ...param, ...extraParam, openId: recommend, channel, subChannel }, method: 'POST' })
return result
}
/***
* 订单修改
*/
export const orderUpdate = async function(param) {
return await orderCreate(param, 'update')
}
/*** 订单列表 */
export const orderList = async function(param) {
const result = await request({ url: path + 'tjapi/order/list', data: param, method: 'POST' })
return result
}
/*** 订单支付 */
export const orderPay = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay', data: param, method: 'POST' })
return result
}
/*** 订单详情 */
export const orderDetail = async function(param) {
const result = await request({ url: path + 'tjapi/order/detail', data: param, method: 'POST' })
return result
}
/*** 支付结果查询 */
export const orderPayQuery = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay/query', data: param, method: 'POST' })
return result
}
/*** 取消订单 */
export const orderCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/cancel', data: param, method: 'POST' })
return result
}
/*** 申请退订 */
export const orderRefundApply = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply', data: param, method: 'POST' })
return result
}
/*** 撤销申请退款 */
export const orderRefundApplyCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply/cancel', data: param, method: 'POST' })
return result
}
/** 上传头像 */
export const uploadAva = function(param) {
const result = request({ url: path + 'tjapi/user/upload', data: param, method: 'POST' })
return result
}
/*****编辑用户信息 */
export const saveUserInfo = function(param) {
const result = request({ url: path + 'tjapi/user/save', data: param, method: 'POST' })
return result
}
// 签到
export const signIn = function(param) {
const result = request({ url: path + 'tjapi/signin/signin', data: param, method: 'POST' })
return result
}
export const tracklog = function(param) {
const result = request({ url: path + 'tjapi/log/push', data: param, method: 'POST' })
return result
}
/***积分明细 */
export const scoreList = function(param) {
const result = request({ url: path + 'tjapi/userscore/detail/query', data: param, method: 'POST' })
return result
}
/*****查询未读消息数据 */
export const messageReadCount = function(param) {
const result = request({ url: path + 'tjapi/message/readCount', data: param, method: 'POST' })
return result
}
/*****查询Udesk消息数据 */
export const gainUdeskMessage = function(param) {
const result = request({ url: 'https://dearcc.udesk.cn/' + 'open_api_v1/customers/get_customer', data: param, method: 'GET' })
return result
}
export const authWX = function(param) {
const result = request({ url: 'https://open.weixin.qq.com
|
export const code2session = async function(appid, code) {
const result: Code2sessionResult = await request({ url: path + 'tjapi/user/wxCodeLogin', data: { appid, code }, method: 'POST' })
|
random_line_split
|
api.ts
|
launchQueue()
// 获取用户类型
export interface UserInfoInterface {
openid: string,
avatarUrl: string,
city: string,
country: string,
gender: number,
language: string,
nickName: string,
province: string
}
export const useropenid = () => {
}
export const weixinSign = function(param) {
return request({ url: path + 'common/weixinSign', data: param, method: 'POST' })
}
export const getUserInfo: (payload: object) => Promise<UserInfoInterface> = async function(payload) {
const result: Promise<UserInfoInterface> = await request({ url: path + 'common/getuserInfo', method: 'POST', data: payload })
return result
}
export const bindingWeChat = function(param) {
return request({ url: path + 'tjapi/user/bindingWeChat', data: param, method: 'POST' })
}
export const bindingWeChatNoCheck = function(param) {
return request({ url: path + 'tjapi/user/bindingWeChatNoCheck', data: param, method: 'POST' })
}
// 微信小程序登录的接口
interface Code2sessionResult {
thirdSession: string,
loginCode: string
}
export const code2session = async function(appid, code) {
const result: Code2sessionResult = await request({ url: path + 'tjapi/user/wxCodeLogin', data: { appid, code }, method: 'POST' })
return result
}
// 微信小程序 微信授权登录 保存用户信息
export const saveWxUserInfoServer = async function(param) {
const result = await request({ url: path + 'common/saveWxUserInfoServer', data: param, method: 'POST' })
return result
}
export const getCity = async function() {
const result = await request({ url: path + 'tjapi/city/getProvinceList', method: 'POST' })
return result
}
// 获取列表
export const getList = async function() {
const result = await request({ url: path + 'common/getList', method: 'POST' })
return result
}
export const getGeolocation = async function(param) {
const result = await request({ url: path + 'tjapi/city/coords/transfer', data: param, method: 'POST' })
return result
}
/*发送手机验证码*/
export const sendRegisterMobileCode = async function(param) {
return request({ url: path + 'tjapi/user/sendCode', data: param, method: 'POST' })
}
/*** 登录 */
export const doLogin = function(param) {
return request({ url: path + 'tjapi/user/codeLogin', data: param, method: 'POST' })
}
/***公众号H5绑定 */
export const doWXH5Login = function(param) {
const channel = Cookie.get('channel')
const subChannel = Cookie.get('subChannel')
const identity = Cookie.get('identity') || getOpenId()
return request({ url: path + 'tjapi/user/publicAccountCodeLogin', data: {channel, subChannel, identity, ...param}, method: 'POST' })
}
/*** 退出 */
export const doLoginOut = function(param) {
return request({ url: path + 'tjapi/user/logout', data: param, method: 'POST' })
}
/*** 账户 */
export const mine = function(param) {
// return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
return appLaunchQueue.delayWhenAppLaunchLoaded().then(function() {
return request({ url: path + 'tjapi/user/mine', data: param, method: 'POST' })
})
}
/*** 订单创建 */
export const orderCreate = async function(param, flag) {
const carConfigDetailModelList = storage.get('carConfigDetailModelList')
let extraParam = {}
if (carConfigDetailModelList) {
const version_price = parseFloat(carConfigDetailModelList[0]['price'].replace(/¥|,/ig,'')) * 100
const orderProductAttrs = [// type 值固定,格式如下;
{'type': 'VERSION', 'value': '尊享版', 'price': version_price}, // 版本
{'type': 'COLOUR', 'value': carConfigDetailModelList[1]['title'], 'price': 0} // 颜色
]
if (carConfigDetailModelList[2]) {
orderProductAttrs.push({'type': 'WHEEL', 'value': carConfigDetailModelList[2]['title'], 'price': 0})
}
if (carConfigDetailModelList[3]) {
orderProductAttrs.push({'type': 'INTERIOR', 'value': carConfigDetailModelList[3]['title'], 'price': 0})
}
if (carConfigDetailModelList[4]) {
const optional_price = parseFloat(carConfigDetailModelList[4]['price'].replace(/¥|,/ig,'')) * 100
orderProductAttrs.push({'type': 'OPTIONAL', 'value': carConfigDetailModelList[4]['title'], 'price': optional_price})
}
extraParam = {
'pickCarDate': '2020年1月份', // 新增提车日期
'version': '1.0', // 新增,新接口固定该值
'orderProductAttrs': orderProductAttrs
}
}
// 加上推荐码
const url = flag==='update' ? 'tjapi/order/updateNew' : 'tjapi/order/create'
const recommend = localGet('recommend') || ''
const channel = localGet('channel') || ''
const subChannel = localGet('subChannel') || ''
const result = await request({ url: path + url, data: { ...param, ...extraParam, openId: recommend, channel, subChannel }, method: 'POST' })
return result
}
/***
* 订单修改
*/
export const orderUpdate = async function(param) {
return await orderCreate(param, 'update')
}
/*** 订单列表 */
export const orderList = async function(param) {
const result = await request({ url: path + 'tjapi/order/list', data: param, method: 'POST' })
return result
}
/*** 订单支付 */
export const orderPay = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay', data: param, method: 'POST' })
return result
}
/*** 订单详情 */
export const orderDetail = async function(param) {
const result = await request({ url: path + 'tjapi/order/detail', data: param, method: 'POST' })
return result
}
/*** 支付结果查询 */
export const orderPayQuery = async function(param) {
const result = await request({ url: path + 'tjapi/order/pay/query', data: param, method: 'POST' })
return result
}
/*** 取消订单 */
export const orderCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/cancel', data: param, method: 'POST' })
return result
}
/*** 申请退订 */
export const orderRefundApply = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply', data: param, method: 'POST' })
return result
}
/*** 撤销申请退款 */
export const orderRefundApplyCancel = async function(param) {
const result = await request({ url: path + 'tjapi/order/refund/apply/cancel', data: param, method: 'POST' })
return result
}
/** 上传头像 */
export const uploadAva = function(param) {
const result = request({ url: path + 'tjapi/user/upload', data: param, method: 'POST' })
return result
}
/*****编辑用户信息 */
export const saveUserInfo = function(param) {
const result = request({ url: path + 'tjapi/user/save', data: param, method: 'POST' })
return result
}
// 签到
export const signIn = function(param) {
const result = request({ url: path + 'tjapi/signin/signin', data: param, method: 'POST' })
return result
}
export const tracklog = function(param) {
const result = request({ url: path + 'tjapi/log/push', data: param, method: 'POST' })
return result
}
/***积分明细 */
export const scoreList = function(param) {
const result = request({ url: path + 'tjapi/userscore/detail/query', data: param, method: 'POST' })
return result
}
/*****查询未读消息数据 */
export const messageReadCount = function(param) {
const result = request({ url: path + 'tjapi/message/readCount', data: param, method: 'POST' })
return result
}
/*****查询Udesk消息数据 */
export const gainUdeskMessage = function(param) {
const result = request({ url: 'https://dearcc.udesk.cn/' + 'open_api
|
}, 100)
}
})
}
}
}
export const appLaunchQueue =
|
conditional_block
|
|
german_traffic_pytorch.py
|
import DataLoader
import torchvision.transforms as T
from torch.utils.data import random_split
import torch.utils.data as data
from torchvision.utils import make_grid
import torch.optim as optim
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
matplotlib.rcParams['figure.facecolor'] = '#ffffff'
dataset_url="https://www.kaggle.com/meowmeowmeowmeowmeow/gtsrb-german-traffic-sign"
od.download(dataset_url)
input_data="/content/gtsrb-german-traffic-sign/Train"
c1=os.listdir(input_data)
print(c1)
train_dataset="/content/gtsrb-german-traffic-sign/Train"
classes=os.listdir(train_dataset)
print(classes)
data_transforms =torchvision.transforms.Compose([
T.Resize([32,32]),
#T.CenterCrop(32),
#T.ColorJitter(brightness=0.5, contrast=0.1, saturation=0.1, hue=0.1),
T.transforms.ToTensor()
])
train_data_path ="/content/gtsrb-german-traffic-sign/train"
train_dataset = torchvision.datasets.ImageFolder(root = train_data_path, transform = data_transforms)
"""# Data Split for Training
"""
val_len=5000
train_len=len(train_dataset)-val_len
train_data,val_data=data.random_split(train_dataset,[train_len,val_len])
len(train_data)
BATCH_SIZE = 32
learning_rate = 0.001
EPOCHS = 15
numClasses = 43
train_loader = data.DataLoader(train_data, shuffle=True, batch_size = BATCH_SIZE,num_workers=3,pin_memory=True)
val_loader = data.DataLoader(val_data,batch_size = BATCH_SIZE*2,num_workers=3,pin_memory=True)
"""# Helper Functions
##For Visualisation
"""
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(12,12))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(images[:64], nrow=8).permute(1, 2, 0).clamp(0,1))
break
show_batch(train_loader)
import jovian
project_name="01-German_traffic"
jovian.commit(project=project_name)
"""## For GPU"""
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
device = get_default_device()
device
train_dl = DeviceDataLoader(train_loader, device)
valid_dl = DeviceDataLoader(val_loader, device)
"""## For Accuracy"""
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
"""#Model Class
Loss and Propagation
"""
# Define optimizer and criterion functions
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
class ImageBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['lrs'][-1], result['train_loss'], result['val_loss'], result['val_acc']))
class Resnet(ImageBase):
|
self.classifier = nn.Sequential(
nn.Linear(128*8*8, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(in_features=512, out_features=output_dim)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
model = Resnet(3,numClasses)
model = to_device(model, device)
# Function to count the number of parameters in the model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# Print model
print(model)
# Print number of trainable parameters in the model
print(f'The model has {count_parameters(model):,} trainable parameters')
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
torch.cuda.empty_cache()
history = []
# Set up cutom optimizer with weight decay
optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
steps_per_epoch=len(train_loader))
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
lrs = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
# Gradient clipping
if grad_clip:
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
# Record & update learning rate
lrs.append(get_lr(optimizer))
sched.step()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_end(epoch, result)
history.append(result)
return history
history = [evaluate(model, valid_dl)]
history
epochs = 10
max_lr = 0.001
grad_clip = 0.1
weight_decay = 1e-6
opt_func = torch.optim.Adam
# Commented out IPython magic to ensure Python compatibility.
# %%time
#
# history += fit_one_cycle(epochs, max_lr, model, train_dl, valid_dl,
# grad_clip=grad_clip,
# weight_decay=weight_decay,
# opt_func=opt_func)
def plot_accuracies(history):
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
plot_accuracies(history)
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val_losses
|
def __init__(self,in_channels, output_dim):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels, out_channels=16, kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #32x16x16
nn.Dropout(0.25),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #128x8x8
nn.Dropout(0.25)
#nn.Flatten()
)
|
identifier_body
|
german_traffic_pytorch.py
|
import DataLoader
import torchvision.transforms as T
from torch.utils.data import random_split
import torch.utils.data as data
from torchvision.utils import make_grid
import torch.optim as optim
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
matplotlib.rcParams['figure.facecolor'] = '#ffffff'
dataset_url="https://www.kaggle.com/meowmeowmeowmeowmeow/gtsrb-german-traffic-sign"
od.download(dataset_url)
input_data="/content/gtsrb-german-traffic-sign/Train"
c1=os.listdir(input_data)
print(c1)
train_dataset="/content/gtsrb-german-traffic-sign/Train"
classes=os.listdir(train_dataset)
print(classes)
data_transforms =torchvision.transforms.Compose([
T.Resize([32,32]),
#T.CenterCrop(32),
#T.ColorJitter(brightness=0.5, contrast=0.1, saturation=0.1, hue=0.1),
T.transforms.ToTensor()
])
train_data_path ="/content/gtsrb-german-traffic-sign/train"
train_dataset = torchvision.datasets.ImageFolder(root = train_data_path, transform = data_transforms)
"""# Data Split for Training
"""
val_len=5000
train_len=len(train_dataset)-val_len
train_data,val_data=data.random_split(train_dataset,[train_len,val_len])
len(train_data)
BATCH_SIZE = 32
learning_rate = 0.001
EPOCHS = 15
numClasses = 43
train_loader = data.DataLoader(train_data, shuffle=True, batch_size = BATCH_SIZE,num_workers=3,pin_memory=True)
val_loader = data.DataLoader(val_data,batch_size = BATCH_SIZE*2,num_workers=3,pin_memory=True)
"""# Helper Functions
##For Visualisation
"""
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(12,12))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(images[:64], nrow=8).permute(1, 2, 0).clamp(0,1))
break
show_batch(train_loader)
import jovian
project_name="01-German_traffic"
jovian.commit(project=project_name)
"""## For GPU"""
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
|
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
device = get_default_device()
device
train_dl = DeviceDataLoader(train_loader, device)
valid_dl = DeviceDataLoader(val_loader, device)
"""## For Accuracy"""
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
"""#Model Class
Loss and Propagation
"""
# Define optimizer and criterion functions
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
class ImageBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['lrs'][-1], result['train_loss'], result['val_loss'], result['val_acc']))
class Resnet(ImageBase):
def __init__(self,in_channels, output_dim):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels, out_channels=16, kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #32x16x16
nn.Dropout(0.25),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #128x8x8
nn.Dropout(0.25)
#nn.Flatten()
)
self.classifier = nn.Sequential(
nn.Linear(128*8*8, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(in_features=512, out_features=output_dim)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
model = Resnet(3,numClasses)
model = to_device(model, device)
# Function to count the number of parameters in the model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# Print model
print(model)
# Print number of trainable parameters in the model
print(f'The model has {count_parameters(model):,} trainable parameters')
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
torch.cuda.empty_cache()
history = []
# Set up cutom optimizer with weight decay
optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
steps_per_epoch=len(train_loader))
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
lrs = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
# Gradient clipping
if grad_clip:
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
# Record & update learning rate
lrs.append(get_lr(optimizer))
sched.step()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_end(epoch, result)
history.append(result)
return history
history = [evaluate(model, valid_dl)]
history
epochs = 10
max_lr = 0.001
grad_clip = 0.1
weight_decay = 1e-6
opt_func = torch.optim.Adam
# Commented out IPython magic to ensure Python compatibility.
# %%time
#
# history += fit_one_cycle(epochs, max_lr, model, train_dl, valid_dl,
# grad_clip=grad_clip,
# weight_decay=weight_decay,
# opt_func=opt_func)
def plot_accuracies(history):
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
plot_accuracies(history)
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val
|
return torch.device('cuda')
|
conditional_block
|
german_traffic_pytorch.py
|
import DataLoader
import torchvision.transforms as T
from torch.utils.data import random_split
import torch.utils.data as data
from torchvision.utils import make_grid
import torch.optim as optim
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
matplotlib.rcParams['figure.facecolor'] = '#ffffff'
dataset_url="https://www.kaggle.com/meowmeowmeowmeowmeow/gtsrb-german-traffic-sign"
od.download(dataset_url)
input_data="/content/gtsrb-german-traffic-sign/Train"
c1=os.listdir(input_data)
print(c1)
train_dataset="/content/gtsrb-german-traffic-sign/Train"
classes=os.listdir(train_dataset)
print(classes)
data_transforms =torchvision.transforms.Compose([
T.Resize([32,32]),
#T.CenterCrop(32),
#T.ColorJitter(brightness=0.5, contrast=0.1, saturation=0.1, hue=0.1),
T.transforms.ToTensor()
])
train_data_path ="/content/gtsrb-german-traffic-sign/train"
train_dataset = torchvision.datasets.ImageFolder(root = train_data_path, transform = data_transforms)
"""# Data Split for Training
"""
val_len=5000
train_len=len(train_dataset)-val_len
train_data,val_data=data.random_split(train_dataset,[train_len,val_len])
len(train_data)
BATCH_SIZE = 32
learning_rate = 0.001
EPOCHS = 15
numClasses = 43
train_loader = data.DataLoader(train_data, shuffle=True, batch_size = BATCH_SIZE,num_workers=3,pin_memory=True)
val_loader = data.DataLoader(val_data,batch_size = BATCH_SIZE*2,num_workers=3,pin_memory=True)
"""# Helper Functions
##For Visualisation
"""
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(12,12))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(images[:64], nrow=8).permute(1, 2, 0).clamp(0,1))
|
import jovian
project_name="01-German_traffic"
jovian.commit(project=project_name)
"""## For GPU"""
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
device = get_default_device()
device
train_dl = DeviceDataLoader(train_loader, device)
valid_dl = DeviceDataLoader(val_loader, device)
"""## For Accuracy"""
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
"""#Model Class
Loss and Propagation
"""
# Define optimizer and criterion functions
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
class ImageBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['lrs'][-1], result['train_loss'], result['val_loss'], result['val_acc']))
class Resnet(ImageBase):
def __init__(self,in_channels, output_dim):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels, out_channels=16, kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #32x16x16
nn.Dropout(0.25),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #128x8x8
nn.Dropout(0.25)
#nn.Flatten()
)
self.classifier = nn.Sequential(
nn.Linear(128*8*8, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(in_features=512, out_features=output_dim)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
model = Resnet(3,numClasses)
model = to_device(model, device)
# Function to count the number of parameters in the model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# Print model
print(model)
# Print number of trainable parameters in the model
print(f'The model has {count_parameters(model):,} trainable parameters')
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
torch.cuda.empty_cache()
history = []
# Set up cutom optimizer with weight decay
optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
steps_per_epoch=len(train_loader))
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
lrs = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
# Gradient clipping
if grad_clip:
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
# Record & update learning rate
lrs.append(get_lr(optimizer))
sched.step()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_end(epoch, result)
history.append(result)
return history
history = [evaluate(model, valid_dl)]
history
epochs = 10
max_lr = 0.001
grad_clip = 0.1
weight_decay = 1e-6
opt_func = torch.optim.Adam
# Commented out IPython magic to ensure Python compatibility.
# %%time
#
# history += fit_one_cycle(epochs, max_lr, model, train_dl, valid_dl,
# grad_clip=grad_clip,
# weight_decay=weight_decay,
# opt_func=opt_func)
def plot_accuracies(history):
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
plot_accuracies(history)
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val
|
break
show_batch(train_loader)
|
random_line_split
|
german_traffic_pytorch.py
|
import DataLoader
import torchvision.transforms as T
from torch.utils.data import random_split
import torch.utils.data as data
from torchvision.utils import make_grid
import torch.optim as optim
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
matplotlib.rcParams['figure.facecolor'] = '#ffffff'
dataset_url="https://www.kaggle.com/meowmeowmeowmeowmeow/gtsrb-german-traffic-sign"
od.download(dataset_url)
input_data="/content/gtsrb-german-traffic-sign/Train"
c1=os.listdir(input_data)
print(c1)
train_dataset="/content/gtsrb-german-traffic-sign/Train"
classes=os.listdir(train_dataset)
print(classes)
data_transforms =torchvision.transforms.Compose([
T.Resize([32,32]),
#T.CenterCrop(32),
#T.ColorJitter(brightness=0.5, contrast=0.1, saturation=0.1, hue=0.1),
T.transforms.ToTensor()
])
train_data_path ="/content/gtsrb-german-traffic-sign/train"
train_dataset = torchvision.datasets.ImageFolder(root = train_data_path, transform = data_transforms)
"""# Data Split for Training
"""
val_len=5000
train_len=len(train_dataset)-val_len
train_data,val_data=data.random_split(train_dataset,[train_len,val_len])
len(train_data)
BATCH_SIZE = 32
learning_rate = 0.001
EPOCHS = 15
numClasses = 43
train_loader = data.DataLoader(train_data, shuffle=True, batch_size = BATCH_SIZE,num_workers=3,pin_memory=True)
val_loader = data.DataLoader(val_data,batch_size = BATCH_SIZE*2,num_workers=3,pin_memory=True)
"""# Helper Functions
##For Visualisation
"""
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(12,12))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(images[:64], nrow=8).permute(1, 2, 0).clamp(0,1))
break
show_batch(train_loader)
import jovian
project_name="01-German_traffic"
jovian.commit(project=project_name)
"""## For GPU"""
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
device = get_default_device()
device
train_dl = DeviceDataLoader(train_loader, device)
valid_dl = DeviceDataLoader(val_loader, device)
"""## For Accuracy"""
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
"""#Model Class
Loss and Propagation
"""
# Define optimizer and criterion functions
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
class ImageBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['lrs'][-1], result['train_loss'], result['val_loss'], result['val_acc']))
class Resnet(ImageBase):
def __init__(self,in_channels, output_dim):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels, out_channels=16, kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #32x16x16
nn.Dropout(0.25),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2), #128x8x8
nn.Dropout(0.25)
#nn.Flatten()
)
self.classifier = nn.Sequential(
nn.Linear(128*8*8, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(in_features=512, out_features=output_dim)
)
def
|
(self, x):
x = self.features(x)
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
model = Resnet(3,numClasses)
model = to_device(model, device)
# Function to count the number of parameters in the model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# Print model
print(model)
# Print number of trainable parameters in the model
print(f'The model has {count_parameters(model):,} trainable parameters')
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
torch.cuda.empty_cache()
history = []
# Set up cutom optimizer with weight decay
optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
steps_per_epoch=len(train_loader))
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
lrs = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
# Gradient clipping
if grad_clip:
nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
# Record & update learning rate
lrs.append(get_lr(optimizer))
sched.step()
# Validation phase
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
result['lrs'] = lrs
model.epoch_end(epoch, result)
history.append(result)
return history
history = [evaluate(model, valid_dl)]
history
epochs = 10
max_lr = 0.001
grad_clip = 0.1
weight_decay = 1e-6
opt_func = torch.optim.Adam
# Commented out IPython magic to ensure Python compatibility.
# %%time
#
# history += fit_one_cycle(epochs, max_lr, model, train_dl, valid_dl,
# grad_clip=grad_clip,
# weight_decay=weight_decay,
# opt_func=opt_func)
def plot_accuracies(history):
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
plot_accuracies(history)
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val
|
forward
|
identifier_name
|
format.go
|
}
var DEFAULT_ARRAY_FORMAT = basicFormat('a', `,`, '[', nil)
var DEFAULT_HASH_FORMAT = basicFormat('h', ` => `, '{', nil)
var DEFAULT_OBJECT_FORMAT = basicFormat('p', ` => `, '(', nil)
var DEFAULT_ARRAY_CONTAINER_FORMAT = basicFormat('p', `,`, '[', nil)
var DEFAULT_HASH_CONTAINER_FORMAT = basicFormat('p', ` => `, '{', nil)
var DEFAULT_OBJECT_CONTAINER_FORMAT = basicFormat('p', ` => `, '(', nil)
var DEFAULT_INDENTATION = newIndentation(false, 0)
var DEFAULT_FORMATS = eval.FormatMap(WrapHash([]*HashEntry{
WrapHashEntry(DefaultObjectType(), DEFAULT_OBJECT_FORMAT),
WrapHashEntry(DefaultTypeType(), DEFAULT_OBJECT_FORMAT),
WrapHashEntry(DefaultFloatType(), simpleFormat('f')),
WrapHashEntry(DefaultNumericType(), simpleFormat('d')),
WrapHashEntry(DefaultArrayType(), DEFAULT_ARRAY_FORMAT),
WrapHashEntry(DefaultHashType(), DEFAULT_HASH_FORMAT),
WrapHashEntry(DefaultBinaryType(), simpleFormat('B')),
WrapHashEntry(DefaultAnyType(), DEFAULT_ANY_FORMAT),
}))
var DEFAULT_CONTAINER_FORMATS = eval.FormatMap(WrapHash([]*HashEntry{
WrapHashEntry(DefaultObjectType(), DEFAULT_OBJECT_CONTAINER_FORMAT),
WrapHashEntry(DefaultTypeType(), DEFAULT_OBJECT_CONTAINER_FORMAT),
WrapHashEntry(DefaultFloatType(), DEFAULT_PROGRAM_FORMAT),
WrapHashEntry(DefaultNumericType(), DEFAULT_PROGRAM_FORMAT),
WrapHashEntry(DefaultArrayType(), DEFAULT_ARRAY_CONTAINER_FORMAT),
WrapHashEntry(DefaultHashType(), DEFAULT_HASH_CONTAINER_FORMAT),
WrapHashEntry(DefaultBinaryType(), DEFAULT_PROGRAM_FORMAT),
WrapHashEntry(DefaultAnyType(), DEFAULT_PROGRAM_FORMAT),
}))
var delimiters = []byte{'[', '{', '(', '<', '|'}
var delimiterPairs = map[byte][2]byte{
'[': {'[', ']'},
'{': {'{', '}'},
'(': {'(', ')'},
'<': {'<', '>'},
'|': {'|', '|'},
' ': {0, 0},
0: {'[', ']'},
}
var NONE = newFormatContext2(DEFAULT_INDENTATION, DEFAULT_FORMATS, nil)
var EXPANDED = newFormatContext2(DEFAULT_INDENTATION, DEFAULT_FORMATS, map[string]string{`expanded`: `true`})
var PROGRAM = newFormatContext2(DEFAULT_INDENTATION, eval.FormatMap(SingletonHash(DefaultAnyType(), DEFAULT_OBJECT_FORMAT)), nil)
func newFormatContext(t eval.Type, format eval.Format, indentation eval.Indentation) eval.FormatContext {
return &formatContext{indentation, WrapHash([]*HashEntry{WrapHashEntry(t, format)}), nil}
}
func newFormatContext2(indentation eval.Indentation, formatMap eval.FormatMap, properties map[string]string) eval.FormatContext {
return &formatContext{indentation, formatMap, properties}
}
var TYPE_STRING_FORMAT = NewVariantType(DefaultStringType(), DefaultDefaultType(), DefaultHashType())
func newFormatContext3(value eval.Value, format eval.Value) (context eval.FormatContext, err error) {
eval.AssertInstance(`String format`, TYPE_STRING_FORMAT, format)
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(issue.Reported); !ok {
panic(r)
}
}
}()
switch format.(type) {
case stringValue:
context = eval.NewFormatContext(value.PType(), newFormat(format.String()), DEFAULT_INDENTATION)
case *DefaultValue:
context = eval.DEFAULT_FORMAT_CONTEXT
default:
context = newFormatContext2(DEFAULT_INDENTATION, mergeFormats(DEFAULT_FORMATS, NewFormatMap(format.(*HashValue))), nil)
}
return
}
func mergeFormats(lower eval.FormatMap, higher eval.FormatMap) eval.FormatMap {
if lower == nil || lower.Len() == 0 {
return higher
}
if higher == nil || higher.Len() == 0 {
return lower
}
higherKeys := higher.Keys()
normLower := WrapHash2(eval.Reject2(lower.Entries(), func(lev eval.Value) bool {
le := lev.(*HashEntry)
return eval.Any2(higherKeys, func(hk eval.Value) bool {
return !hk.Equals(le.Key(), nil) && eval.IsAssignable(hk.(eval.Type), le.Key().(eval.Type))
})
}))
merged := make([]*HashEntry, 0, 8)
normLower.Keys().AddAll(higherKeys).Unique().Each(func(k eval.Value) {
if low, ok := normLower.Get(k); ok {
if high, ok := higher.Get(k); ok {
merged = append(merged, WrapHashEntry(k, merge(low.(eval.Format), high.(eval.Format))))
} else {
merged = append(merged, WrapHashEntry(k, low))
}
} else {
if high, ok := higher.Get(k); ok {
merged = append(merged, WrapHashEntry(k, high))
}
}
})
sort.Slice(merged, func(ax, bx int) bool {
a := merged[ax].Key().(eval.Type)
b := merged[bx].Key().(eval.Type)
if a.Equals(b, nil) {
return false
}
ab := eval.IsAssignable(b, a)
ba := eval.IsAssignable(a, b)
if ab && !ba {
return true
}
if !ab && ba {
return false
}
ra := typeRank(a)
rb := typeRank(b)
if ra < rb {
return true
}
if ra > rb {
return false
}
return strings.Compare(a.String(), b.String()) < 0
})
return eval.FormatMap(WrapHash(merged))
}
func merge(low eval.Format, high eval.Format) eval.Format {
sep := high.Separator(NO_STRING)
if sep == NO_STRING {
sep = low.Separator(NO_STRING)
}
sep2 := high.Separator2(NO_STRING)
if sep2 == NO_STRING {
sep2 = low.Separator2(NO_STRING)
}
return &format{
origFmt: high.OrigFormat(),
alt: high.IsAlt(),
leftDelimiter: high.LeftDelimiter(),
formatChar: high.FormatChar(),
zeroPad: high.IsZeroPad(),
prec: high.Precision(),
left: high.IsLeft(),
plus: high.Plus(),
width: high.Width(),
separator2: sep2,
separator: sep,
containerFormats: mergeFormats(low.ContainerFormats(), high.ContainerFormats()),
}
}
func typeRank(pt eval.Type) int {
switch pt.(type) {
case *NumericType, *IntegerType, *FloatType:
return 13
case *stringType, *vcStringType, *scStringType:
return 12
case *EnumType:
return 11
case *PatternType:
return 10
case *ArrayType:
return 4
case *TupleType:
return 3
case *HashType:
return 2
case *StructType:
return 1
}
return 0
}
var TYPE_STRING_FORMAT_TYPE_HASH = NewHashType(DefaultTypeType(), NewVariantType(DefaultStringType(), DefaultHashType()), nil)
func NewFormatMap(h *HashValue) eval.FormatMap {
eval.AssertInstance(`String format type hash`, TYPE_STRING_FORMAT_TYPE_HASH, h)
result := make([]*HashEntry, h.Len())
h.EachWithIndex(func(elem eval.Value, idx int) {
entry := elem.(*HashEntry)
pt := entry.Key().(eval.Type)
v := entry.Value()
if s, ok := v.(stringValue); ok {
result[idx] = WrapHashEntry(pt, newFormat(s.String()))
} else {
result[idx] = WrapHashEntry(pt, FormatFromHash(v.(*HashValue)))
}
})
return eval.FormatMap(WrapHash(result))
}
func NewFormatMap2(t eval.Type, tf eval.Format, fm eval.FormatMap) eval.FormatMap {
return mergeFormats(fm, eval.FormatMap(WrapHash([]*HashEntry{{t, tf}})))
}
var TYPE_STRING_FORMAT_HASH = NewStructType([]*StructElement{
NewStructElement2(`format`, DefaultStringType()),
NewStructElement(NewOptionalType3(`separator`), DefaultStringType()),
NewStructElement(NewOptionalType3(`separator2`), DefaultStringType()),
NewStructElement(NewOptionalType3(`string_formats`), DefaultHashType()),
})
func FormatFromHash(h *HashValue) eval.Format {
eval.AssertInstance(`String format hash`, TYPE_STRING_FORMAT_HASH, h)
stringArg := func(key string, required bool) string {
v := h.Get5(key, _UNDEF)
switch v.(type) {
case stringValue:
return v.String()
default:
return NO_STRING
}
}
var cf eval.FormatMap
cf = nil
if v := h.Get5(`string_formats`, _UNDEF); !eval.Equals(v, _UNDEF) {
cf = NewFormatMap(v.(*HashValue))
}
return parseFormat(stringArg(`format`, true), stringArg(`separator`, false), stringArg(`separator2`, false), cf)
}
func
|
eval.NewFormatContext3 = newFormatContext3
eval.NewIndentation = newIndentation
eval.NewFormat = newFormat
eval.PRETTY_EXPANDED = eval.PRETTY.WithProperties(map[string]string{`expanded`: `true`})
|
random_line_split
|
|
format.go
|
Entry, h.Len())
h.EachWithIndex(func(elem eval.Value, idx int) {
entry := elem.(*HashEntry)
pt := entry.Key().(eval.Type)
v := entry.Value()
if s, ok := v.(stringValue); ok {
result[idx] = WrapHashEntry(pt, newFormat(s.String()))
} else {
result[idx] = WrapHashEntry(pt, FormatFromHash(v.(*HashValue)))
}
})
return eval.FormatMap(WrapHash(result))
}
func NewFormatMap2(t eval.Type, tf eval.Format, fm eval.FormatMap) eval.FormatMap {
return mergeFormats(fm, eval.FormatMap(WrapHash([]*HashEntry{{t, tf}})))
}
var TYPE_STRING_FORMAT_HASH = NewStructType([]*StructElement{
NewStructElement2(`format`, DefaultStringType()),
NewStructElement(NewOptionalType3(`separator`), DefaultStringType()),
NewStructElement(NewOptionalType3(`separator2`), DefaultStringType()),
NewStructElement(NewOptionalType3(`string_formats`), DefaultHashType()),
})
func FormatFromHash(h *HashValue) eval.Format {
eval.AssertInstance(`String format hash`, TYPE_STRING_FORMAT_HASH, h)
stringArg := func(key string, required bool) string {
v := h.Get5(key, _UNDEF)
switch v.(type) {
case stringValue:
return v.String()
default:
return NO_STRING
}
}
var cf eval.FormatMap
cf = nil
if v := h.Get5(`string_formats`, _UNDEF); !eval.Equals(v, _UNDEF) {
cf = NewFormatMap(v.(*HashValue))
}
return parseFormat(stringArg(`format`, true), stringArg(`separator`, false), stringArg(`separator2`, false), cf)
}
func (c *formatContext) Indentation() eval.Indentation {
return c.indentation
}
func (c *formatContext) FormatMap() eval.FormatMap {
return c.formatMap
}
func (c *formatContext) Property(key string) (string, bool) {
if c.properties != nil {
pv, ok := c.properties[key]
return pv, ok
}
return ``, false
}
func (c *formatContext) Properties() map[string]string {
return c.properties
}
func (c *formatContext) SetProperty(key, value string) {
if c.properties == nil {
c.properties = map[string]string{key: value}
} else {
c.properties[key] = value
}
}
func (c *formatContext) UnsupportedFormat(t eval.Type, supportedFormats string, actualFormat eval.Format) error {
return eval.Error(eval.EVAL_UNSUPPORTED_STRING_FORMAT, issue.H{`format`: actualFormat.FormatChar(), `type`: t.Name(), `supported_formats`: supportedFormats})
}
func (c *formatContext) WithProperties(properties map[string]string) eval.FormatContext {
if c.properties != nil {
merged := make(map[string]string, len(c.properties)+len(properties))
for k, v := range c.properties {
merged[k] = v
}
for k, v := range properties {
merged[k] = v
}
properties = merged
}
return newFormatContext2(c.indentation, c.formatMap, properties)
}
func newIndentation(indenting bool, level int) eval.Indentation {
return newIndentation2(true, indenting, level)
}
func newIndentation2(first bool, indenting bool, level int) eval.Indentation {
return &indentation{first, indenting, level, strings.Repeat(` `, level)}
}
func (i *indentation) Breaks() bool {
return i.indenting && i.level > 0 && !i.first
}
func (i *indentation) Level() int {
return i.level
}
func (i *indentation) Increase(indenting bool) eval.Indentation {
return newIndentation2(true, indenting, i.level+1)
}
func (i *indentation) Indenting(indenting bool) eval.Indentation {
if i.indenting == indenting {
return i
}
return &indentation{i.first, indenting, i.level, i.padding}
}
func (i *indentation) IsFirst() bool {
return i.first
}
func (i *indentation) IsIndenting() bool {
return i.indenting
}
func (i *indentation) Padding() string {
return i.padding
}
func (i *indentation) Subsequent() eval.Indentation {
if i.first {
return &indentation{false, i.indenting, i.level, i.padding}
}
return i
}
// NewFormat parses a format string into a Format
func newFormat(format string) eval.Format {
return parseFormat(format, NO_STRING, NO_STRING, nil)
}
func simpleFormat(formatChar byte) eval.Format {
return basicFormat(formatChar, NO_STRING, '[', nil)
}
func basicFormat(formatChar byte, sep2 string, leftDelimiter byte, containerFormats eval.FormatMap) eval.Format {
return &format{
formatChar: formatChar,
prec: -1,
width: -1,
origFmt: `%` + string(formatChar),
separator: `,`,
separator2: sep2,
leftDelimiter: leftDelimiter,
containerFormats: containerFormats,
}
}
func basicAltFormat(formatChar byte, sep2 string, leftDelimiter byte, containerFormats eval.FormatMap) eval.Format {
return &format{
formatChar: formatChar,
alt: true,
prec: -1,
width: -1,
origFmt: `%` + string(formatChar),
separator: `,`,
separator2: sep2,
leftDelimiter: leftDelimiter,
containerFormats: containerFormats,
}
}
func parseFormat(origFmt string, separator string, separator2 string, containerFormats eval.FormatMap) eval.Format {
group := eval.FORMAT_PATTERN.FindStringSubmatch(origFmt)
if group == nil {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_SPEC, issue.H{`format`: origFmt}))
}
flags := group[1]
plus := byte(0)
if hasDelimOnce(flags, origFmt, ' ') {
plus = ' '
} else if hasDelimOnce(flags, origFmt, '+') {
plus = '+'
}
foundDelim := byte(0)
for _, delim := range delimiters {
if hasDelimOnce(flags, origFmt, delim) {
if foundDelim != 0 {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_DELIMITER, issue.H{`delimiter`: foundDelim}))
}
foundDelim = delim
}
}
if foundDelim == 0 && plus == ' ' {
foundDelim = plus
}
width := -1
prec := -1
if tmp := group[2]; tmp != `` {
width, _ = strconv.Atoi(tmp)
}
if tmp := group[3]; tmp != `` {
prec, _ = strconv.Atoi(tmp)
}
return &format{
origFmt: origFmt,
formatChar: group[4][0],
left: hasDelimOnce(flags, origFmt, '-'),
alt: hasDelimOnce(flags, origFmt, '#'),
zeroPad: hasDelimOnce(flags, origFmt, '0'),
plus: plus,
leftDelimiter: foundDelim,
width: width,
prec: prec,
separator: separator,
separator2: separator2,
containerFormats: containerFormats,
}
}
func (f *format) unParse() string {
b := bytes.NewBufferString(`%`)
if f.zeroPad {
b.Write([]byte{'0'})
}
if f.plus != 0 {
b.Write([]byte{f.plus})
}
if f.left {
b.Write([]byte{'-'})
}
if f.leftDelimiter != 0 && f.leftDelimiter != f.plus {
b.Write([]byte{f.leftDelimiter})
}
if f.width >= 0 {
b.WriteString(strconv.Itoa(f.width))
}
if f.prec >= 0 {
b.Write([]byte{'.'})
b.WriteString(strconv.Itoa(f.prec))
}
if f.alt {
b.Write([]byte{'#'})
}
b.Write([]byte{f.formatChar})
return b.String()
}
func hasDelimOnce(flags string, format string, delim byte) bool {
found := false
for _, b := range flags {
if byte(b) == delim {
if found {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_REPEATED_FLAG, issue.H{`format`: format}))
}
found = true
}
}
return found
}
func (f *format) HasStringFlags() bool {
return f.left || f.width >= 0 || f.prec >= 0
}
func (f *format) ApplyStringFlags(b io.Writer, str string, quoted bool) {
if f.HasStringFlags() {
bld := bytes.NewBufferString(``)
if quoted {
utils.PuppetQuote(bld, str)
str = bld.String()
bld.Truncate(0)
}
bld.WriteByte('%')
if f.IsLeft() {
bld.WriteByte('-')
}
if f.Width() >= 0
|
{
fmt.Fprintf(bld, `%d`, f.Width())
}
|
conditional_block
|
|
format.go
|
return 12
case *EnumType:
return 11
case *PatternType:
return 10
case *ArrayType:
return 4
case *TupleType:
return 3
case *HashType:
return 2
case *StructType:
return 1
}
return 0
}
var TYPE_STRING_FORMAT_TYPE_HASH = NewHashType(DefaultTypeType(), NewVariantType(DefaultStringType(), DefaultHashType()), nil)
func NewFormatMap(h *HashValue) eval.FormatMap {
eval.AssertInstance(`String format type hash`, TYPE_STRING_FORMAT_TYPE_HASH, h)
result := make([]*HashEntry, h.Len())
h.EachWithIndex(func(elem eval.Value, idx int) {
entry := elem.(*HashEntry)
pt := entry.Key().(eval.Type)
v := entry.Value()
if s, ok := v.(stringValue); ok {
result[idx] = WrapHashEntry(pt, newFormat(s.String()))
} else {
result[idx] = WrapHashEntry(pt, FormatFromHash(v.(*HashValue)))
}
})
return eval.FormatMap(WrapHash(result))
}
func NewFormatMap2(t eval.Type, tf eval.Format, fm eval.FormatMap) eval.FormatMap {
return mergeFormats(fm, eval.FormatMap(WrapHash([]*HashEntry{{t, tf}})))
}
var TYPE_STRING_FORMAT_HASH = NewStructType([]*StructElement{
NewStructElement2(`format`, DefaultStringType()),
NewStructElement(NewOptionalType3(`separator`), DefaultStringType()),
NewStructElement(NewOptionalType3(`separator2`), DefaultStringType()),
NewStructElement(NewOptionalType3(`string_formats`), DefaultHashType()),
})
func FormatFromHash(h *HashValue) eval.Format {
eval.AssertInstance(`String format hash`, TYPE_STRING_FORMAT_HASH, h)
stringArg := func(key string, required bool) string {
v := h.Get5(key, _UNDEF)
switch v.(type) {
case stringValue:
return v.String()
default:
return NO_STRING
}
}
var cf eval.FormatMap
cf = nil
if v := h.Get5(`string_formats`, _UNDEF); !eval.Equals(v, _UNDEF) {
cf = NewFormatMap(v.(*HashValue))
}
return parseFormat(stringArg(`format`, true), stringArg(`separator`, false), stringArg(`separator2`, false), cf)
}
func (c *formatContext) Indentation() eval.Indentation {
return c.indentation
}
func (c *formatContext) FormatMap() eval.FormatMap {
return c.formatMap
}
func (c *formatContext) Property(key string) (string, bool) {
if c.properties != nil {
pv, ok := c.properties[key]
return pv, ok
}
return ``, false
}
func (c *formatContext) Properties() map[string]string {
return c.properties
}
func (c *formatContext) SetProperty(key, value string) {
if c.properties == nil {
c.properties = map[string]string{key: value}
} else {
c.properties[key] = value
}
}
func (c *formatContext) UnsupportedFormat(t eval.Type, supportedFormats string, actualFormat eval.Format) error {
return eval.Error(eval.EVAL_UNSUPPORTED_STRING_FORMAT, issue.H{`format`: actualFormat.FormatChar(), `type`: t.Name(), `supported_formats`: supportedFormats})
}
func (c *formatContext) WithProperties(properties map[string]string) eval.FormatContext {
if c.properties != nil {
merged := make(map[string]string, len(c.properties)+len(properties))
for k, v := range c.properties {
merged[k] = v
}
for k, v := range properties {
merged[k] = v
}
properties = merged
}
return newFormatContext2(c.indentation, c.formatMap, properties)
}
func newIndentation(indenting bool, level int) eval.Indentation {
return newIndentation2(true, indenting, level)
}
func newIndentation2(first bool, indenting bool, level int) eval.Indentation {
return &indentation{first, indenting, level, strings.Repeat(` `, level)}
}
func (i *indentation) Breaks() bool {
return i.indenting && i.level > 0 && !i.first
}
func (i *indentation) Level() int {
return i.level
}
func (i *indentation) Increase(indenting bool) eval.Indentation {
return newIndentation2(true, indenting, i.level+1)
}
func (i *indentation) Indenting(indenting bool) eval.Indentation {
if i.indenting == indenting {
return i
}
return &indentation{i.first, indenting, i.level, i.padding}
}
func (i *indentation) IsFirst() bool {
return i.first
}
func (i *indentation) IsIndenting() bool {
return i.indenting
}
func (i *indentation) Padding() string {
return i.padding
}
func (i *indentation) Subsequent() eval.Indentation {
if i.first {
return &indentation{false, i.indenting, i.level, i.padding}
}
return i
}
// NewFormat parses a format string into a Format
func newFormat(format string) eval.Format {
return parseFormat(format, NO_STRING, NO_STRING, nil)
}
func simpleFormat(formatChar byte) eval.Format {
return basicFormat(formatChar, NO_STRING, '[', nil)
}
func basicFormat(formatChar byte, sep2 string, leftDelimiter byte, containerFormats eval.FormatMap) eval.Format {
return &format{
formatChar: formatChar,
prec: -1,
width: -1,
origFmt: `%` + string(formatChar),
separator: `,`,
separator2: sep2,
leftDelimiter: leftDelimiter,
containerFormats: containerFormats,
}
}
func basicAltFormat(formatChar byte, sep2 string, leftDelimiter byte, containerFormats eval.FormatMap) eval.Format {
return &format{
formatChar: formatChar,
alt: true,
prec: -1,
width: -1,
origFmt: `%` + string(formatChar),
separator: `,`,
separator2: sep2,
leftDelimiter: leftDelimiter,
containerFormats: containerFormats,
}
}
func parseFormat(origFmt string, separator string, separator2 string, containerFormats eval.FormatMap) eval.Format {
group := eval.FORMAT_PATTERN.FindStringSubmatch(origFmt)
if group == nil {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_SPEC, issue.H{`format`: origFmt}))
}
flags := group[1]
plus := byte(0)
if hasDelimOnce(flags, origFmt, ' ') {
plus = ' '
} else if hasDelimOnce(flags, origFmt, '+') {
plus = '+'
}
foundDelim := byte(0)
for _, delim := range delimiters {
if hasDelimOnce(flags, origFmt, delim) {
if foundDelim != 0 {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_DELIMITER, issue.H{`delimiter`: foundDelim}))
}
foundDelim = delim
}
}
if foundDelim == 0 && plus == ' ' {
foundDelim = plus
}
width := -1
prec := -1
if tmp := group[2]; tmp != `` {
width, _ = strconv.Atoi(tmp)
}
if tmp := group[3]; tmp != `` {
prec, _ = strconv.Atoi(tmp)
}
return &format{
origFmt: origFmt,
formatChar: group[4][0],
left: hasDelimOnce(flags, origFmt, '-'),
alt: hasDelimOnce(flags, origFmt, '#'),
zeroPad: hasDelimOnce(flags, origFmt, '0'),
plus: plus,
leftDelimiter: foundDelim,
width: width,
prec: prec,
separator: separator,
separator2: separator2,
containerFormats: containerFormats,
}
}
func (f *format) unParse() string {
b := bytes.NewBufferString(`%`)
if f.zeroPad {
b.Write([]byte{'0'})
}
if f.plus != 0 {
b.Write([]byte{f.plus})
}
if f.left {
b.Write([]byte{'-'})
}
if f.leftDelimiter != 0 && f.leftDelimiter != f.plus {
b.Write([]byte{f.leftDelimiter})
}
if f.width >= 0 {
b.WriteString(strconv.Itoa(f.width))
}
if f.prec >= 0 {
b.Write([]byte{'.'})
b.WriteString(strconv.Itoa(f.prec))
}
if f.alt {
b.Write([]byte{'#'})
}
b.Write([]byte{f.formatChar})
return b.String()
}
func hasDelimOnce(flags string, format string, delim byte) bool {
found := false
for _, b := range flags {
if byte(b) == delim {
if found {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_REPEATED_FLAG, issue.H{`format`: format}))
}
found = true
}
}
return found
}
func (f *format)
|
HasStringFlags
|
identifier_name
|
|
format.go
|
(c *formatContext) Property(key string) (string, bool) {
if c.properties != nil {
pv, ok := c.properties[key]
return pv, ok
}
return ``, false
}
func (c *formatContext) Properties() map[string]string {
return c.properties
}
func (c *formatContext) SetProperty(key, value string) {
if c.properties == nil {
c.properties = map[string]string{key: value}
} else {
c.properties[key] = value
}
}
func (c *formatContext) UnsupportedFormat(t eval.Type, supportedFormats string, actualFormat eval.Format) error {
return eval.Error(eval.EVAL_UNSUPPORTED_STRING_FORMAT, issue.H{`format`: actualFormat.FormatChar(), `type`: t.Name(), `supported_formats`: supportedFormats})
}
func (c *formatContext) WithProperties(properties map[string]string) eval.FormatContext {
if c.properties != nil {
merged := make(map[string]string, len(c.properties)+len(properties))
for k, v := range c.properties {
merged[k] = v
}
for k, v := range properties {
merged[k] = v
}
properties = merged
}
return newFormatContext2(c.indentation, c.formatMap, properties)
}
func newIndentation(indenting bool, level int) eval.Indentation {
return newIndentation2(true, indenting, level)
}
func newIndentation2(first bool, indenting bool, level int) eval.Indentation {
return &indentation{first, indenting, level, strings.Repeat(` `, level)}
}
func (i *indentation) Breaks() bool {
return i.indenting && i.level > 0 && !i.first
}
func (i *indentation) Level() int {
return i.level
}
func (i *indentation) Increase(indenting bool) eval.Indentation {
return newIndentation2(true, indenting, i.level+1)
}
func (i *indentation) Indenting(indenting bool) eval.Indentation {
if i.indenting == indenting {
return i
}
return &indentation{i.first, indenting, i.level, i.padding}
}
func (i *indentation) IsFirst() bool {
return i.first
}
func (i *indentation) IsIndenting() bool {
return i.indenting
}
func (i *indentation) Padding() string {
return i.padding
}
func (i *indentation) Subsequent() eval.Indentation {
if i.first {
return &indentation{false, i.indenting, i.level, i.padding}
}
return i
}
// NewFormat parses a format string into a Format
func newFormat(format string) eval.Format {
return parseFormat(format, NO_STRING, NO_STRING, nil)
}
func simpleFormat(formatChar byte) eval.Format {
return basicFormat(formatChar, NO_STRING, '[', nil)
}
func basicFormat(formatChar byte, sep2 string, leftDelimiter byte, containerFormats eval.FormatMap) eval.Format {
return &format{
formatChar: formatChar,
prec: -1,
width: -1,
origFmt: `%` + string(formatChar),
separator: `,`,
separator2: sep2,
leftDelimiter: leftDelimiter,
containerFormats: containerFormats,
}
}
func basicAltFormat(formatChar byte, sep2 string, leftDelimiter byte, containerFormats eval.FormatMap) eval.Format {
return &format{
formatChar: formatChar,
alt: true,
prec: -1,
width: -1,
origFmt: `%` + string(formatChar),
separator: `,`,
separator2: sep2,
leftDelimiter: leftDelimiter,
containerFormats: containerFormats,
}
}
func parseFormat(origFmt string, separator string, separator2 string, containerFormats eval.FormatMap) eval.Format {
group := eval.FORMAT_PATTERN.FindStringSubmatch(origFmt)
if group == nil {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_SPEC, issue.H{`format`: origFmt}))
}
flags := group[1]
plus := byte(0)
if hasDelimOnce(flags, origFmt, ' ') {
plus = ' '
} else if hasDelimOnce(flags, origFmt, '+') {
plus = '+'
}
foundDelim := byte(0)
for _, delim := range delimiters {
if hasDelimOnce(flags, origFmt, delim) {
if foundDelim != 0 {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_DELIMITER, issue.H{`delimiter`: foundDelim}))
}
foundDelim = delim
}
}
if foundDelim == 0 && plus == ' ' {
foundDelim = plus
}
width := -1
prec := -1
if tmp := group[2]; tmp != `` {
width, _ = strconv.Atoi(tmp)
}
if tmp := group[3]; tmp != `` {
prec, _ = strconv.Atoi(tmp)
}
return &format{
origFmt: origFmt,
formatChar: group[4][0],
left: hasDelimOnce(flags, origFmt, '-'),
alt: hasDelimOnce(flags, origFmt, '#'),
zeroPad: hasDelimOnce(flags, origFmt, '0'),
plus: plus,
leftDelimiter: foundDelim,
width: width,
prec: prec,
separator: separator,
separator2: separator2,
containerFormats: containerFormats,
}
}
func (f *format) unParse() string {
b := bytes.NewBufferString(`%`)
if f.zeroPad {
b.Write([]byte{'0'})
}
if f.plus != 0 {
b.Write([]byte{f.plus})
}
if f.left {
b.Write([]byte{'-'})
}
if f.leftDelimiter != 0 && f.leftDelimiter != f.plus {
b.Write([]byte{f.leftDelimiter})
}
if f.width >= 0 {
b.WriteString(strconv.Itoa(f.width))
}
if f.prec >= 0 {
b.Write([]byte{'.'})
b.WriteString(strconv.Itoa(f.prec))
}
if f.alt {
b.Write([]byte{'#'})
}
b.Write([]byte{f.formatChar})
return b.String()
}
func hasDelimOnce(flags string, format string, delim byte) bool {
found := false
for _, b := range flags {
if byte(b) == delim {
if found {
panic(eval.Error(eval.EVAL_INVALID_STRING_FORMAT_REPEATED_FLAG, issue.H{`format`: format}))
}
found = true
}
}
return found
}
func (f *format) HasStringFlags() bool {
return f.left || f.width >= 0 || f.prec >= 0
}
func (f *format) ApplyStringFlags(b io.Writer, str string, quoted bool) {
if f.HasStringFlags() {
bld := bytes.NewBufferString(``)
if quoted {
utils.PuppetQuote(bld, str)
str = bld.String()
bld.Truncate(0)
}
bld.WriteByte('%')
if f.IsLeft() {
bld.WriteByte('-')
}
if f.Width() >= 0 {
fmt.Fprintf(bld, `%d`, f.Width())
}
if f.Precision() >= 0 {
fmt.Fprintf(bld, `.%d`, f.Precision())
}
bld.WriteByte('s')
fmt.Fprintf(b, bld.String(), str)
} else {
if quoted {
utils.PuppetQuote(b, str)
} else {
io.WriteString(b, str)
}
}
}
func (f *format) Width() int {
return f.width
}
func (f *format) Precision() int {
return f.prec
}
func (f *format) FormatChar() byte {
return f.formatChar
}
func (f *format) Plus() byte {
return f.plus
}
func (f *format) IsAlt() bool {
return f.alt
}
func (f *format) IsLeft() bool {
return f.left
}
func (f *format) IsZeroPad() bool {
return f.zeroPad
}
func (f *format) LeftDelimiter() byte {
return f.leftDelimiter
}
func (f *format) ContainerFormats() eval.FormatMap {
return f.containerFormats
}
func (f *format) Separator(dflt string) string {
if f.separator == NO_STRING {
return dflt
}
return f.separator
}
func (f *format) Separator2(dflt string) string {
if f.separator2 == NO_STRING {
return dflt
}
return f.separator2
}
func (f *format) OrigFormat() string {
return f.origFmt
}
func (f *format) ReplaceFormatChar(c byte) eval.Format {
nf := &format{}
*nf = *f
nf.formatChar = c
nf.origFmt = nf.unParse()
return nf
}
func (f *format) WithoutWidth() eval.Format
|
{
nf := &format{}
*nf = *f
nf.width = -1
nf.left = false
nf.zeroPad = false
nf.alt = false
nf.origFmt = nf.unParse()
return nf
}
|
identifier_body
|
|
text.rs
|
font: Sdl2Font,
) -> Result<Self, String>
|
total_width += w;
total_height = h;
} else {
return Err(format!("Unsupported character: {}", c));
}
}
let mut font_canvas = Surface::new(
total_width,
total_height,
texture_creator.default_pixel_format(),
)?
.into_canvas()?;
let font_texture_creator = font_canvas.texture_creator();
let mut x = 0;
for (i, c) in ASCII.char_indices() {
let GlyphRegion { width, .. } = glyphs[i];
let char_surface = font
.render(&c.to_string())
.blended(Color::RGBA(255, 255, 255, 255))
.map_err(to_string)?;
let char_tex = font_texture_creator
.create_texture_from_surface(&char_surface)
.map_err(to_string)?;
let target = Rect::new(x, 0, width, total_height);
font_canvas.copy(&char_tex, None, Some(target))?;
x += width as i32;
}
Ok(Font {
font_canvas: font_canvas,
glyphs,
line_height: total_height,
space_advance,
texture_creator,
cached_texts: HashMap::new(),
})
}
pub fn draw(&mut self, screen_txt: ScreenText, cvs: &mut WindowCanvas) -> Result<(), String> {
let cache_key = screen_txt.text.to_string();
if let Some((ref mut tex, w, h)) = self.cached_texts.get_mut(&cache_key) {
let (tw, th) = scale_dim(screen_txt.scale, *w, *h);
let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th);
tex.set_alpha_mod(screen_txt.alpha);
cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?;
return Ok(());
}
let pos = screen_txt.pos;
let align = screen_txt.align;
let prepared_text = prepare(screen_txt, self);
let (w, h) = prepared_text.dim;
let ScreenPos(x, y) = pos.align(align, w, h);
let pixel_format = self.texture_creator.default_pixel_format();
// let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32;
// draw the text to the temporay image
let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?;
let font_texture_creator = text_cvs.texture_creator();
let mut font_texture = font_texture_creator
.create_texture_from_surface(self.font_canvas.surface())
.map_err(to_string)?;
draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?;
// create a texture for the correct render target and for caching
let target_tex = self
.texture_creator
.create_texture_from_surface(text_cvs.surface())
.map_err(to_string)?;
// actually draw the text texture
cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?;
// cache the created texture for future frames
self.cached_texts.insert(cache_key, (target_tex, w, h));
Ok(())
}
}
struct PreparedWord {
chars: Vec<(i32, i32, u32, u32)>,
width: u32,
}
impl PreparedWord {
fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self {
let mut x = 0;
let mut chars = Vec::new();
for c in txt.chars() {
if let Some(r) = find_glyph_region(c, glyphs) {
chars.push((r.start, r.advance, r.width, r.height));
x = x + r.advance;
}
}
PreparedWord {
chars,
width: x as u32,
}
}
fn draw(
self: &Self,
texture: &Texture,
cvs: &mut Canvas<Surface>,
pos: (i32, i32),
) -> Result<(), String> {
let (mut x, y) = pos;
for (start, advance, width, height) in self.chars.iter() {
let from = Rect::new(*start, 0, *width, *height);
let to = Rect::new(x, y, *width, *height);
cvs.copy(&texture, Some(from), Some(to))?;
x = x + advance;
}
Ok(())
}
}
struct PreparedText {
lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>,
dim: (u32, u32),
text_dim: (u32, u32),
align: Align,
color: (u8, u8, u8, u8),
background: Option<Color>,
padding: u32,
border: Option<(u32, Color)>,
}
fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText {
let (mut x, mut y) = (0, 0);
let mut lines = Vec::new();
let mut text_width: u32 = 0;
let border_width = text.border.map(|(w, _)| w).unwrap_or(0);
let spacing = 2 * text.padding + 2 * border_width;
let max_width = text.max_width - spacing;
for line in text.text.into_string().lines() {
let mut words = Vec::new();
let mut line_width: u32 = 0;
for t in line.split_whitespace() {
let word = PreparedWord::prepare(&font.glyphs, t);
let text_width = word.width;
let advance = font.space_advance + text_width as i32;
if x > 0 && (x + advance) as u32 > max_width {
// text does not fit in current line
// => wrap text (no wrap if first word in line)
lines.push((y, max_width, words));
words = Vec::new();
x = 0;
y += font.line_height as i32;
line_width = max_width;
}
words.push((x, word));
x += advance;
if x as u32 > line_width {
line_width = x as u32;
}
}
lines.push((y, line_width, words));
x = 0;
y += font.line_height as i32;
text_width = max(text_width, line_width);
}
let w = text_width + spacing;
let h = y as u32 + spacing;
PreparedText {
lines,
dim: (max(text.min_width, w), max(text.min_height, h)),
text_dim: (w, h),
align: text.text_align,
color: text.color,
background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)),
padding: text.padding,
border: text
.border
.map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))),
}
}
fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> {
let ascii_index = c as usize;
if ascii_index >= 32 && ascii_index <= 126 {
metrics.get(ascii_index - 32)
} else {
None
}
}
fn to_string(s: impl ToString) -> String {
s.to_string()
}
fn draw_background(
cvs: &mut Canvas<Surface>,
color: Color,
x: i32,
y: i32,
w: u32,
h: u32,
) -> Result<(), String> {
if color.a < 255 {
// set the background to transparent white
// (the blending with default black bg is to dark)
cvs.set_draw_color(Color::RGBA(255, 255, 255, 0));
cvs.clear();
cvs.set_blend_mode(BlendMode::Blend);
} else {
cvs.set_blend_mode(BlendMode::None);
}
cvs.set_draw_color(color);
cvs.fill_rect(Rect::new(x, y, w, h))
}
fn draw_border(
cvs: &mut Canvas<Surface>,
color: Color,
bw: u32,
x: i32,
y: i32
|
{
let mut total_width = 0;
let mut total_height = 0;
let mut glyphs: Vec<GlyphRegion> = Vec::new();
let mut space_advance = 0;
for c in ASCII.chars() {
if let Some(metric) = font.find_glyph_metrics(c) {
let (w, h) = font.size_of_char(c).map_err(to_string)?;
glyphs.push(GlyphRegion {
start: total_width as i32,
width: w,
height: h,
advance: metric.advance,
});
if c == ' ' {
space_advance = metric.advance;
}
|
identifier_body
|
text.rs
|
font: Sdl2Font,
) -> Result<Self, String> {
let mut total_width = 0;
let mut total_height = 0;
let mut glyphs: Vec<GlyphRegion> = Vec::new();
let mut space_advance = 0;
for c in ASCII.chars() {
if let Some(metric) = font.find_glyph_metrics(c) {
let (w, h) = font.size_of_char(c).map_err(to_string)?;
glyphs.push(GlyphRegion {
start: total_width as i32,
width: w,
height: h,
advance: metric.advance,
});
if c == ' ' {
space_advance = metric.advance;
}
total_width += w;
total_height = h;
} else {
return Err(format!("Unsupported character: {}", c));
}
}
let mut font_canvas = Surface::new(
total_width,
total_height,
texture_creator.default_pixel_format(),
)?
.into_canvas()?;
let font_texture_creator = font_canvas.texture_creator();
let mut x = 0;
for (i, c) in ASCII.char_indices() {
let GlyphRegion { width, .. } = glyphs[i];
let char_surface = font
.render(&c.to_string())
.blended(Color::RGBA(255, 255, 255, 255))
.map_err(to_string)?;
let char_tex = font_texture_creator
.create_texture_from_surface(&char_surface)
.map_err(to_string)?;
let target = Rect::new(x, 0, width, total_height);
font_canvas.copy(&char_tex, None, Some(target))?;
x += width as i32;
}
Ok(Font {
|
line_height: total_height,
space_advance,
texture_creator,
cached_texts: HashMap::new(),
})
}
pub fn draw(&mut self, screen_txt: ScreenText, cvs: &mut WindowCanvas) -> Result<(), String> {
let cache_key = screen_txt.text.to_string();
if let Some((ref mut tex, w, h)) = self.cached_texts.get_mut(&cache_key) {
let (tw, th) = scale_dim(screen_txt.scale, *w, *h);
let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th);
tex.set_alpha_mod(screen_txt.alpha);
cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?;
return Ok(());
}
let pos = screen_txt.pos;
let align = screen_txt.align;
let prepared_text = prepare(screen_txt, self);
let (w, h) = prepared_text.dim;
let ScreenPos(x, y) = pos.align(align, w, h);
let pixel_format = self.texture_creator.default_pixel_format();
// let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32;
// draw the text to the temporay image
let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?;
let font_texture_creator = text_cvs.texture_creator();
let mut font_texture = font_texture_creator
.create_texture_from_surface(self.font_canvas.surface())
.map_err(to_string)?;
draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?;
// create a texture for the correct render target and for caching
let target_tex = self
.texture_creator
.create_texture_from_surface(text_cvs.surface())
.map_err(to_string)?;
// actually draw the text texture
cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?;
// cache the created texture for future frames
self.cached_texts.insert(cache_key, (target_tex, w, h));
Ok(())
}
}
struct PreparedWord {
chars: Vec<(i32, i32, u32, u32)>,
width: u32,
}
impl PreparedWord {
fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self {
let mut x = 0;
let mut chars = Vec::new();
for c in txt.chars() {
if let Some(r) = find_glyph_region(c, glyphs) {
chars.push((r.start, r.advance, r.width, r.height));
x = x + r.advance;
}
}
PreparedWord {
chars,
width: x as u32,
}
}
fn draw(
self: &Self,
texture: &Texture,
cvs: &mut Canvas<Surface>,
pos: (i32, i32),
) -> Result<(), String> {
let (mut x, y) = pos;
for (start, advance, width, height) in self.chars.iter() {
let from = Rect::new(*start, 0, *width, *height);
let to = Rect::new(x, y, *width, *height);
cvs.copy(&texture, Some(from), Some(to))?;
x = x + advance;
}
Ok(())
}
}
struct PreparedText {
lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>,
dim: (u32, u32),
text_dim: (u32, u32),
align: Align,
color: (u8, u8, u8, u8),
background: Option<Color>,
padding: u32,
border: Option<(u32, Color)>,
}
fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText {
let (mut x, mut y) = (0, 0);
let mut lines = Vec::new();
let mut text_width: u32 = 0;
let border_width = text.border.map(|(w, _)| w).unwrap_or(0);
let spacing = 2 * text.padding + 2 * border_width;
let max_width = text.max_width - spacing;
for line in text.text.into_string().lines() {
let mut words = Vec::new();
let mut line_width: u32 = 0;
for t in line.split_whitespace() {
let word = PreparedWord::prepare(&font.glyphs, t);
let text_width = word.width;
let advance = font.space_advance + text_width as i32;
if x > 0 && (x + advance) as u32 > max_width {
// text does not fit in current line
// => wrap text (no wrap if first word in line)
lines.push((y, max_width, words));
words = Vec::new();
x = 0;
y += font.line_height as i32;
line_width = max_width;
}
words.push((x, word));
x += advance;
if x as u32 > line_width {
line_width = x as u32;
}
}
lines.push((y, line_width, words));
x = 0;
y += font.line_height as i32;
text_width = max(text_width, line_width);
}
let w = text_width + spacing;
let h = y as u32 + spacing;
PreparedText {
lines,
dim: (max(text.min_width, w), max(text.min_height, h)),
text_dim: (w, h),
align: text.text_align,
color: text.color,
background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)),
padding: text.padding,
border: text
.border
.map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))),
}
}
fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> {
let ascii_index = c as usize;
if ascii_index >= 32 && ascii_index <= 126 {
metrics.get(ascii_index - 32)
} else {
None
}
}
fn to_string(s: impl ToString) -> String {
s.to_string()
}
fn draw_background(
cvs: &mut Canvas<Surface>,
color: Color,
x: i32,
y: i32,
w: u32,
h: u32,
) -> Result<(), String> {
if color.a < 255 {
// set the background to transparent white
// (the blending with default black bg is to dark)
cvs.set_draw_color(Color::RGBA(255, 255, 255, 0));
cvs.clear();
cvs.set_blend_mode(BlendMode::Blend);
} else {
cvs.set_blend_mode(BlendMode::None);
}
cvs.set_draw_color(color);
cvs.fill_rect(Rect::new(x, y, w, h))
}
fn draw_border(
cvs: &mut Canvas<Surface>,
color: Color,
bw: u32,
x: i32,
y: i32,
|
font_canvas: font_canvas,
glyphs,
|
random_line_split
|
text.rs
|
, h)) = self.cached_texts.get_mut(&cache_key) {
let (tw, th) = scale_dim(screen_txt.scale, *w, *h);
let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th);
tex.set_alpha_mod(screen_txt.alpha);
cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?;
return Ok(());
}
let pos = screen_txt.pos;
let align = screen_txt.align;
let prepared_text = prepare(screen_txt, self);
let (w, h) = prepared_text.dim;
let ScreenPos(x, y) = pos.align(align, w, h);
let pixel_format = self.texture_creator.default_pixel_format();
// let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32;
// draw the text to the temporay image
let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?;
let font_texture_creator = text_cvs.texture_creator();
let mut font_texture = font_texture_creator
.create_texture_from_surface(self.font_canvas.surface())
.map_err(to_string)?;
draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?;
// create a texture for the correct render target and for caching
let target_tex = self
.texture_creator
.create_texture_from_surface(text_cvs.surface())
.map_err(to_string)?;
// actually draw the text texture
cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?;
// cache the created texture for future frames
self.cached_texts.insert(cache_key, (target_tex, w, h));
Ok(())
}
}
struct PreparedWord {
chars: Vec<(i32, i32, u32, u32)>,
width: u32,
}
impl PreparedWord {
fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self {
let mut x = 0;
let mut chars = Vec::new();
for c in txt.chars() {
if let Some(r) = find_glyph_region(c, glyphs) {
chars.push((r.start, r.advance, r.width, r.height));
x = x + r.advance;
}
}
PreparedWord {
chars,
width: x as u32,
}
}
fn draw(
self: &Self,
texture: &Texture,
cvs: &mut Canvas<Surface>,
pos: (i32, i32),
) -> Result<(), String> {
let (mut x, y) = pos;
for (start, advance, width, height) in self.chars.iter() {
let from = Rect::new(*start, 0, *width, *height);
let to = Rect::new(x, y, *width, *height);
cvs.copy(&texture, Some(from), Some(to))?;
x = x + advance;
}
Ok(())
}
}
struct PreparedText {
lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>,
dim: (u32, u32),
text_dim: (u32, u32),
align: Align,
color: (u8, u8, u8, u8),
background: Option<Color>,
padding: u32,
border: Option<(u32, Color)>,
}
fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText {
let (mut x, mut y) = (0, 0);
let mut lines = Vec::new();
let mut text_width: u32 = 0;
let border_width = text.border.map(|(w, _)| w).unwrap_or(0);
let spacing = 2 * text.padding + 2 * border_width;
let max_width = text.max_width - spacing;
for line in text.text.into_string().lines() {
let mut words = Vec::new();
let mut line_width: u32 = 0;
for t in line.split_whitespace() {
let word = PreparedWord::prepare(&font.glyphs, t);
let text_width = word.width;
let advance = font.space_advance + text_width as i32;
if x > 0 && (x + advance) as u32 > max_width {
// text does not fit in current line
// => wrap text (no wrap if first word in line)
lines.push((y, max_width, words));
words = Vec::new();
x = 0;
y += font.line_height as i32;
line_width = max_width;
}
words.push((x, word));
x += advance;
if x as u32 > line_width {
line_width = x as u32;
}
}
lines.push((y, line_width, words));
x = 0;
y += font.line_height as i32;
text_width = max(text_width, line_width);
}
let w = text_width + spacing;
let h = y as u32 + spacing;
PreparedText {
lines,
dim: (max(text.min_width, w), max(text.min_height, h)),
text_dim: (w, h),
align: text.text_align,
color: text.color,
background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)),
padding: text.padding,
border: text
.border
.map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))),
}
}
fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> {
let ascii_index = c as usize;
if ascii_index >= 32 && ascii_index <= 126 {
metrics.get(ascii_index - 32)
} else {
None
}
}
fn to_string(s: impl ToString) -> String {
s.to_string()
}
fn draw_background(
cvs: &mut Canvas<Surface>,
color: Color,
x: i32,
y: i32,
w: u32,
h: u32,
) -> Result<(), String> {
if color.a < 255 {
// set the background to transparent white
// (the blending with default black bg is to dark)
cvs.set_draw_color(Color::RGBA(255, 255, 255, 0));
cvs.clear();
cvs.set_blend_mode(BlendMode::Blend);
} else {
cvs.set_blend_mode(BlendMode::None);
}
cvs.set_draw_color(color);
cvs.fill_rect(Rect::new(x, y, w, h))
}
fn draw_border(
cvs: &mut Canvas<Surface>,
color: Color,
bw: u32,
x: i32,
y: i32,
w: u32,
h: u32,
) -> Result<(), String> {
let xl = x;
let xr = x + w as i32 - bw as i32;
let yt = y;
let yb = y + h as i32 - bw as i32;
cvs.set_draw_color(color);
cvs.fill_rect(Rect::new(xl, yt, w, bw))?; // top
cvs.fill_rect(Rect::new(xl, yt, bw, h))?; // left
cvs.fill_rect(Rect::new(xr, yt, bw, h))?; // right
cvs.fill_rect(Rect::new(xl, yb, w, bw))?; // bottom
Ok(())
}
fn draw_text(
text: PreparedText,
cvs: &mut Canvas<Surface>,
texture: &mut Texture,
(w, h): (u32, u32),
) -> Result<(), String> {
if let Some(bg_color) = text.background {
draw_background(cvs, bg_color, 0, 0, w, h)?;
}
if let Some((bw, border_color)) = text.border {
draw_border(cvs, border_color, bw, 0, 0, w, h)?;
}
texture.set_alpha_mod(text.color.3);
texture.set_color_mod(text.color.0, text.color.1, text.color.2);
let shift = text.border.map(|(val, _)| val).unwrap_or(0) as i32 + text.padding as i32;
let shift_y = align_line_vertical(text.align, text.text_dim.1, h) + shift;
for (offset_y, line_width, line) in text.lines.iter() {
let shift_x = align_line_horizontal(text.align, *line_width, w) + shift;
for (offset_x, word) in line {
word.draw(texture, cvs, (shift_x + offset_x, shift_y + offset_y))?;
}
}
texture.set_alpha_mod(255);
texture.set_color_mod(0, 0, 0);
Ok(())
}
fn
|
scale_dim
|
identifier_name
|
|
text.rs
|
font: Sdl2Font,
) -> Result<Self, String> {
let mut total_width = 0;
let mut total_height = 0;
let mut glyphs: Vec<GlyphRegion> = Vec::new();
let mut space_advance = 0;
for c in ASCII.chars() {
if let Some(metric) = font.find_glyph_metrics(c)
|
else {
return Err(format!("Unsupported character: {}", c));
}
}
let mut font_canvas = Surface::new(
total_width,
total_height,
texture_creator.default_pixel_format(),
)?
.into_canvas()?;
let font_texture_creator = font_canvas.texture_creator();
let mut x = 0;
for (i, c) in ASCII.char_indices() {
let GlyphRegion { width, .. } = glyphs[i];
let char_surface = font
.render(&c.to_string())
.blended(Color::RGBA(255, 255, 255, 255))
.map_err(to_string)?;
let char_tex = font_texture_creator
.create_texture_from_surface(&char_surface)
.map_err(to_string)?;
let target = Rect::new(x, 0, width, total_height);
font_canvas.copy(&char_tex, None, Some(target))?;
x += width as i32;
}
Ok(Font {
font_canvas: font_canvas,
glyphs,
line_height: total_height,
space_advance,
texture_creator,
cached_texts: HashMap::new(),
})
}
pub fn draw(&mut self, screen_txt: ScreenText, cvs: &mut WindowCanvas) -> Result<(), String> {
let cache_key = screen_txt.text.to_string();
if let Some((ref mut tex, w, h)) = self.cached_texts.get_mut(&cache_key) {
let (tw, th) = scale_dim(screen_txt.scale, *w, *h);
let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th);
tex.set_alpha_mod(screen_txt.alpha);
cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?;
return Ok(());
}
let pos = screen_txt.pos;
let align = screen_txt.align;
let prepared_text = prepare(screen_txt, self);
let (w, h) = prepared_text.dim;
let ScreenPos(x, y) = pos.align(align, w, h);
let pixel_format = self.texture_creator.default_pixel_format();
// let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32;
// draw the text to the temporay image
let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?;
let font_texture_creator = text_cvs.texture_creator();
let mut font_texture = font_texture_creator
.create_texture_from_surface(self.font_canvas.surface())
.map_err(to_string)?;
draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?;
// create a texture for the correct render target and for caching
let target_tex = self
.texture_creator
.create_texture_from_surface(text_cvs.surface())
.map_err(to_string)?;
// actually draw the text texture
cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?;
// cache the created texture for future frames
self.cached_texts.insert(cache_key, (target_tex, w, h));
Ok(())
}
}
struct PreparedWord {
chars: Vec<(i32, i32, u32, u32)>,
width: u32,
}
impl PreparedWord {
fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self {
let mut x = 0;
let mut chars = Vec::new();
for c in txt.chars() {
if let Some(r) = find_glyph_region(c, glyphs) {
chars.push((r.start, r.advance, r.width, r.height));
x = x + r.advance;
}
}
PreparedWord {
chars,
width: x as u32,
}
}
fn draw(
self: &Self,
texture: &Texture,
cvs: &mut Canvas<Surface>,
pos: (i32, i32),
) -> Result<(), String> {
let (mut x, y) = pos;
for (start, advance, width, height) in self.chars.iter() {
let from = Rect::new(*start, 0, *width, *height);
let to = Rect::new(x, y, *width, *height);
cvs.copy(&texture, Some(from), Some(to))?;
x = x + advance;
}
Ok(())
}
}
struct PreparedText {
lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>,
dim: (u32, u32),
text_dim: (u32, u32),
align: Align,
color: (u8, u8, u8, u8),
background: Option<Color>,
padding: u32,
border: Option<(u32, Color)>,
}
fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText {
let (mut x, mut y) = (0, 0);
let mut lines = Vec::new();
let mut text_width: u32 = 0;
let border_width = text.border.map(|(w, _)| w).unwrap_or(0);
let spacing = 2 * text.padding + 2 * border_width;
let max_width = text.max_width - spacing;
for line in text.text.into_string().lines() {
let mut words = Vec::new();
let mut line_width: u32 = 0;
for t in line.split_whitespace() {
let word = PreparedWord::prepare(&font.glyphs, t);
let text_width = word.width;
let advance = font.space_advance + text_width as i32;
if x > 0 && (x + advance) as u32 > max_width {
// text does not fit in current line
// => wrap text (no wrap if first word in line)
lines.push((y, max_width, words));
words = Vec::new();
x = 0;
y += font.line_height as i32;
line_width = max_width;
}
words.push((x, word));
x += advance;
if x as u32 > line_width {
line_width = x as u32;
}
}
lines.push((y, line_width, words));
x = 0;
y += font.line_height as i32;
text_width = max(text_width, line_width);
}
let w = text_width + spacing;
let h = y as u32 + spacing;
PreparedText {
lines,
dim: (max(text.min_width, w), max(text.min_height, h)),
text_dim: (w, h),
align: text.text_align,
color: text.color,
background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)),
padding: text.padding,
border: text
.border
.map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))),
}
}
fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> {
let ascii_index = c as usize;
if ascii_index >= 32 && ascii_index <= 126 {
metrics.get(ascii_index - 32)
} else {
None
}
}
fn to_string(s: impl ToString) -> String {
s.to_string()
}
fn draw_background(
cvs: &mut Canvas<Surface>,
color: Color,
x: i32,
y: i32,
w: u32,
h: u32,
) -> Result<(), String> {
if color.a < 255 {
// set the background to transparent white
// (the blending with default black bg is to dark)
cvs.set_draw_color(Color::RGBA(255, 255, 255, 0));
cvs.clear();
cvs.set_blend_mode(BlendMode::Blend);
} else {
cvs.set_blend_mode(BlendMode::None);
}
cvs.set_draw_color(color);
cvs.fill_rect(Rect::new(x, y, w, h))
}
fn draw_border(
cvs: &mut Canvas<Surface>,
color: Color,
bw: u32,
x: i32,
y: i32,
|
{
let (w, h) = font.size_of_char(c).map_err(to_string)?;
glyphs.push(GlyphRegion {
start: total_width as i32,
width: w,
height: h,
advance: metric.advance,
});
if c == ' ' {
space_advance = metric.advance;
}
total_width += w;
total_height = h;
}
|
conditional_block
|
rest.go
|
{} //should help with the concurrent map writes issue
var wg sync.WaitGroup //multi threading the GET details request
wg.Add(len(filesList.Children))
for i := 0; i < len(filesList.Children); i++ {
go func(i int) {
defer wg.Done()
var fileDetail helpers.FileStorageJSON
var data2, _ = auth.GetRestAPI(url+"/api/storage/"+repo+"/"+download+filesList.Children[i].URI, username, apiKey, "", "")
|
if strings.Contains(download+filesList.Children[i].URI, "%") {
log.Warn("Encoding charactrer % detected in file URL, ", download+filesList.Children[i].URI, ", skipping")
return
}
if !strings.Contains(fileDetail.DownloadURI, url) {
log.Debug("Debug, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail)
log.Warn("It looks like your URL context has been updated, as the file URL is different. Please reset your download.json")
//os.Exit(1)
}
time, _ := time.Parse(time.RFC3339, fileDetail.LastModified)
mutex.Lock()
unsorted[i+1] = helpers.FileStorageJSON{
LastModified: fileDetail.LastModified,
ConvertedTime: time,
Size: fileDetail.Size,
Path: fileDetail.Path,
DownloadURI: fileDetail.DownloadURI,
Checksums: fileDetail.Checksums,
}
mutex.Unlock()
}(i)
}
wg.Wait()
//get unsorted data and sort it
sorted := make(helpers.TimeSlice, 0, len(unsorted))
for _, d := range unsorted {
sorted = append(sorted, d)
}
sort.Sort(sorted)
helpers.PrintSorted(sorted, url, repo, download)
return sorted
}
//DownloadFilesList download files selected
func DownloadFilesList(sorted helpers.TimeSlice, creds auth.Creds, flags helpers.Flags, masterkey, readmeFileName string) {
sortedSize := len(sorted)
fmt.Println("Which files do you wish to download? Please separate each number by a space. Use a '-' for ranges, like: 1 3-6 11-12:")
reader := bufio.NewReader(os.Stdin)
downloadIn, _ := reader.ReadString('\n')
download := strings.TrimSuffix(downloadIn, "\n")
words := strings.Fields(download)
//download all
if strings.HasPrefix(download, "0 ") || download == "0" || strings.HasSuffix(download, " 0") || strings.Contains(download, " 0 ") {
log.Info("zero detected, downloading everything")
words = nil
for i := 0; i < sortedSize; i++ {
t := strconv.Itoa(i + 1)
words = append(words, t)
}
} else if strings.Contains(download, "-") {
//parse ranges
words = nil
numbers := strings.Split(download, " ")
for i := 0; i < len(numbers); i++ {
if strings.Contains(numbers[i], "-") {
log.Info("found number with dash ", numbers[i])
splitNumbers := strings.Split(numbers[i], "-")
first, err := strconv.Atoi(splitNumbers[0])
helpers.Check(err, true, "floor check", helpers.Trace())
second, err := strconv.Atoi(splitNumbers[len(splitNumbers)-1])
helpers.Check(err, true, "ceiling check", helpers.Trace())
for j := first; j <= second; j++ {
log.Debug("adding to download:", j)
words = append(words, strconv.Itoa(j))
}
} else {
words = append(words, numbers[i])
}
}
}
log.Debug("downloading the indexes (raw):", words)
//remove duplicates from list
check := make(map[string]int)
for _, val := range words {
check[val] = 1
}
words = nil
for letter, _ := range check {
words = append(words, letter)
}
log.Info("downloading the indexes (dedup):", words)
//path := strings.TrimPrefix(sorted[0].DownloadURI, creds.URL+"/"+creds.Repository+"/")
path := strings.TrimPrefix(sorted[0].Path, "/")
log.Debug("Path trimmed:" + path)
path = path[:strings.IndexByte(path, '/')]
relativePath := creds.DlLocation + "/" + path + "/"
var filesystemChecksums = make(map[string]string)
if _, err := os.Stat(relativePath); os.IsNotExist(err) {
log.Debug("%s does not exist, creating\n", relativePath)
err2 := os.Mkdir(relativePath, 0700)
helpers.Check(err2, true, "Creating log folder", helpers.Trace())
} else {
log.Info(relativePath, " exists, running checksum validation")
f, err := os.Open(relativePath)
helpers.Check(err, true, "Opening download directory", helpers.Trace())
files, err := f.Readdir(-1)
f.Close()
helpers.Check(err, true, "Reading download directory files", helpers.Trace())
for _, file := range files {
if file.IsDir() {
//I guess we could walk the entire tree if we wanted..
log.Info(file.Name(), " is a directory. skipping\n")
continue
}
//store list of checksums in memory then compare before download
if flags.SkipDownloadedChecksumCheckVar == false {
log.Debug("Checksum check not skipped for:", relativePath+file.Name())
sha2 := helpers.ComputeSha256(relativePath + file.Name())
filesystemChecksums[sha2] = relativePath + file.Name()
}
}
}
//create file
readme := relativePath + "/" + readmeFileName
log.Debug("Trying to create readme file under ", readme)
DetectDetailsFile(readme, masterkey)
log.Debug("size of index", words)
for key := range words {
//check if the index is an invalid option, skip if needed
size := helpers.StringToInt64(words[key])
if size < 1 || size > int64(sortedSize) {
log.Warn("Out of bounds number ", words[key], ", skipping")
continue
}
//fileName := strings.TrimPrefix(sorted[size-1].DownloadURI, creds.URL+"/"+creds.Repository+"/"+path+"/")
fileName := strings.TrimPrefix(sorted[size-1].Path, "/"+path+"/")
log.Debug("fileName trimmed:", fileName, " path:", path, " sorted[size-1].Path:", sorted[size-1].Path)
//check shasum of download against in folder
if filesystemChecksums[sorted[size-1].Checksums.Sha256] != "" {
log.Info("file ", fileName, " exists, skipping download\n")
continue
}
log.Info("downloading ", words[key], " ", sorted[size-1].DownloadURI)
log.Debug("sorted:", sorted)
// do some naive file type detection here
readableFilesExtensions := []string{"txt", "pdf", "json", "yaml", "yml", "json", "xml", "log"}
var readableFile bool
for i := range readableFilesExtensions {
if strings.HasSuffix(fileName, readableFilesExtensions[i]) {
log.Info("do not create folder, is readable without unarchiving:", fileName)
readableFile = true
}
}
oldRelativePath := relativePath
if !readableFile {
log.Info("creating folder due to archive:", relativePath+fileName+"-folder")
err := os.Mkdir(relativePath+fileName+"-folder", 0755)
helpers.Check(err, false, "Archive folder create", helpers.Trace())
relativePath = relativePath + fileName + "-folder/"
}
_, filepath := auth.GetRestAPI(sorted[size-1].DownloadURI, creds.Username, creds.Apikey, relativePath+fileName, sorted[size-1].Checksums.Sha256)
if !readableFile {
log.Debug("creating symlink for file:", fileName)
os.Symlink(relativePath+fileName, oldRelativePath+"."+fileName)
//create symlink post download for checksum checker
}
log.Info("Successfully finished downloading ", sorted[size-1].DownloadURI)
//try to unarchive if true
if flags.UnzipVar {
//file type detection
buff := make([]byte, 512)
file, err := os.Open(filepath)
helpers.Check(err, true, "File testing failed at open:", helpers.Trace())
_, err = file.Read(buff)
helpers.Check(err, true, "File testing failed at read:", helpers.Trace())
filetype := http.DetectContentType(buff)
switch filetype {
case "application/x-gzip", "application/zip":
log.Info("File is compressed with gzip or zip, attempting to unzip")
log.Debug("Unzipping ", filepath, " to ", filepath+"-folder")
err := helpers.Unzip(filepath, filepath+"-folder")
if err != nil {
log.Error(err)
}
default
|
json.Unmarshal([]byte(data2), &fileDetail)
log.Debug("Debug before, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail, " download uri:", download+filesList.Children[i].URI)
|
random_line_split
|
rest.go
|
{} //should help with the concurrent map writes issue
var wg sync.WaitGroup //multi threading the GET details request
wg.Add(len(filesList.Children))
for i := 0; i < len(filesList.Children); i++ {
go func(i int) {
defer wg.Done()
var fileDetail helpers.FileStorageJSON
var data2, _ = auth.GetRestAPI(url+"/api/storage/"+repo+"/"+download+filesList.Children[i].URI, username, apiKey, "", "")
json.Unmarshal([]byte(data2), &fileDetail)
log.Debug("Debug before, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail, " download uri:", download+filesList.Children[i].URI)
if strings.Contains(download+filesList.Children[i].URI, "%") {
log.Warn("Encoding charactrer % detected in file URL, ", download+filesList.Children[i].URI, ", skipping")
return
}
if !strings.Contains(fileDetail.DownloadURI, url) {
log.Debug("Debug, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail)
log.Warn("It looks like your URL context has been updated, as the file URL is different. Please reset your download.json")
//os.Exit(1)
}
time, _ := time.Parse(time.RFC3339, fileDetail.LastModified)
mutex.Lock()
unsorted[i+1] = helpers.FileStorageJSON{
LastModified: fileDetail.LastModified,
ConvertedTime: time,
Size: fileDetail.Size,
Path: fileDetail.Path,
DownloadURI: fileDetail.DownloadURI,
Checksums: fileDetail.Checksums,
}
mutex.Unlock()
}(i)
}
wg.Wait()
//get unsorted data and sort it
sorted := make(helpers.TimeSlice, 0, len(unsorted))
for _, d := range unsorted {
sorted = append(sorted, d)
}
sort.Sort(sorted)
helpers.PrintSorted(sorted, url, repo, download)
return sorted
}
//DownloadFilesList download files selected
func DownloadFilesList(sorted helpers.TimeSlice, creds auth.Creds, flags helpers.Flags, masterkey, readmeFileName string) {
sortedSize := len(sorted)
fmt.Println("Which files do you wish to download? Please separate each number by a space. Use a '-' for ranges, like: 1 3-6 11-12:")
reader := bufio.NewReader(os.Stdin)
downloadIn, _ := reader.ReadString('\n')
download := strings.TrimSuffix(downloadIn, "\n")
words := strings.Fields(download)
//download all
if strings.HasPrefix(download, "0 ") || download == "0" || strings.HasSuffix(download, " 0") || strings.Contains(download, " 0 ")
|
else if strings.Contains(download, "-") {
//parse ranges
words = nil
numbers := strings.Split(download, " ")
for i := 0; i < len(numbers); i++ {
if strings.Contains(numbers[i], "-") {
log.Info("found number with dash ", numbers[i])
splitNumbers := strings.Split(numbers[i], "-")
first, err := strconv.Atoi(splitNumbers[0])
helpers.Check(err, true, "floor check", helpers.Trace())
second, err := strconv.Atoi(splitNumbers[len(splitNumbers)-1])
helpers.Check(err, true, "ceiling check", helpers.Trace())
for j := first; j <= second; j++ {
log.Debug("adding to download:", j)
words = append(words, strconv.Itoa(j))
}
} else {
words = append(words, numbers[i])
}
}
}
log.Debug("downloading the indexes (raw):", words)
//remove duplicates from list
check := make(map[string]int)
for _, val := range words {
check[val] = 1
}
words = nil
for letter, _ := range check {
words = append(words, letter)
}
log.Info("downloading the indexes (dedup):", words)
//path := strings.TrimPrefix(sorted[0].DownloadURI, creds.URL+"/"+creds.Repository+"/")
path := strings.TrimPrefix(sorted[0].Path, "/")
log.Debug("Path trimmed:" + path)
path = path[:strings.IndexByte(path, '/')]
relativePath := creds.DlLocation + "/" + path + "/"
var filesystemChecksums = make(map[string]string)
if _, err := os.Stat(relativePath); os.IsNotExist(err) {
log.Debug("%s does not exist, creating\n", relativePath)
err2 := os.Mkdir(relativePath, 0700)
helpers.Check(err2, true, "Creating log folder", helpers.Trace())
} else {
log.Info(relativePath, " exists, running checksum validation")
f, err := os.Open(relativePath)
helpers.Check(err, true, "Opening download directory", helpers.Trace())
files, err := f.Readdir(-1)
f.Close()
helpers.Check(err, true, "Reading download directory files", helpers.Trace())
for _, file := range files {
if file.IsDir() {
//I guess we could walk the entire tree if we wanted..
log.Info(file.Name(), " is a directory. skipping\n")
continue
}
//store list of checksums in memory then compare before download
if flags.SkipDownloadedChecksumCheckVar == false {
log.Debug("Checksum check not skipped for:", relativePath+file.Name())
sha2 := helpers.ComputeSha256(relativePath + file.Name())
filesystemChecksums[sha2] = relativePath + file.Name()
}
}
}
//create file
readme := relativePath + "/" + readmeFileName
log.Debug("Trying to create readme file under ", readme)
DetectDetailsFile(readme, masterkey)
log.Debug("size of index", words)
for key := range words {
//check if the index is an invalid option, skip if needed
size := helpers.StringToInt64(words[key])
if size < 1 || size > int64(sortedSize) {
log.Warn("Out of bounds number ", words[key], ", skipping")
continue
}
//fileName := strings.TrimPrefix(sorted[size-1].DownloadURI, creds.URL+"/"+creds.Repository+"/"+path+"/")
fileName := strings.TrimPrefix(sorted[size-1].Path, "/"+path+"/")
log.Debug("fileName trimmed:", fileName, " path:", path, " sorted[size-1].Path:", sorted[size-1].Path)
//check shasum of download against in folder
if filesystemChecksums[sorted[size-1].Checksums.Sha256] != "" {
log.Info("file ", fileName, " exists, skipping download\n")
continue
}
log.Info("downloading ", words[key], " ", sorted[size-1].DownloadURI)
log.Debug("sorted:", sorted)
// do some naive file type detection here
readableFilesExtensions := []string{"txt", "pdf", "json", "yaml", "yml", "json", "xml", "log"}
var readableFile bool
for i := range readableFilesExtensions {
if strings.HasSuffix(fileName, readableFilesExtensions[i]) {
log.Info("do not create folder, is readable without unarchiving:", fileName)
readableFile = true
}
}
oldRelativePath := relativePath
if !readableFile {
log.Info("creating folder due to archive:", relativePath+fileName+"-folder")
err := os.Mkdir(relativePath+fileName+"-folder", 0755)
helpers.Check(err, false, "Archive folder create", helpers.Trace())
relativePath = relativePath + fileName + "-folder/"
}
_, filepath := auth.GetRestAPI(sorted[size-1].DownloadURI, creds.Username, creds.Apikey, relativePath+fileName, sorted[size-1].Checksums.Sha256)
if !readableFile {
log.Debug("creating symlink for file:", fileName)
os.Symlink(relativePath+fileName, oldRelativePath+"."+fileName)
//create symlink post download for checksum checker
}
log.Info("Successfully finished downloading ", sorted[size-1].DownloadURI)
//try to unarchive if true
if flags.UnzipVar {
//file type detection
buff := make([]byte, 512)
file, err := os.Open(filepath)
helpers.Check(err, true, "File testing failed at open:", helpers.Trace())
_, err = file.Read(buff)
helpers.Check(err, true, "File testing failed at read:", helpers.Trace())
filetype := http.DetectContentType(buff)
switch filetype {
case "application/x-gzip", "application/zip":
log.Info("File is compressed with gzip or zip, attempting to unzip")
log.Debug("Unzipping ", filepath, " to ", filepath+"-folder")
err := helpers.Unzip(filepath, filepath+"-folder")
if err != nil {
log.Error(err)
}
default
|
{
log.Info("zero detected, downloading everything")
words = nil
for i := 0; i < sortedSize; i++ {
t := strconv.Itoa(i + 1)
words = append(words, t)
}
}
|
conditional_block
|
rest.go
|
{} //should help with the concurrent map writes issue
var wg sync.WaitGroup //multi threading the GET details request
wg.Add(len(filesList.Children))
for i := 0; i < len(filesList.Children); i++ {
go func(i int) {
defer wg.Done()
var fileDetail helpers.FileStorageJSON
var data2, _ = auth.GetRestAPI(url+"/api/storage/"+repo+"/"+download+filesList.Children[i].URI, username, apiKey, "", "")
json.Unmarshal([]byte(data2), &fileDetail)
log.Debug("Debug before, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail, " download uri:", download+filesList.Children[i].URI)
if strings.Contains(download+filesList.Children[i].URI, "%") {
log.Warn("Encoding charactrer % detected in file URL, ", download+filesList.Children[i].URI, ", skipping")
return
}
if !strings.Contains(fileDetail.DownloadURI, url) {
log.Debug("Debug, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail)
log.Warn("It looks like your URL context has been updated, as the file URL is different. Please reset your download.json")
//os.Exit(1)
}
time, _ := time.Parse(time.RFC3339, fileDetail.LastModified)
mutex.Lock()
unsorted[i+1] = helpers.FileStorageJSON{
LastModified: fileDetail.LastModified,
ConvertedTime: time,
Size: fileDetail.Size,
Path: fileDetail.Path,
DownloadURI: fileDetail.DownloadURI,
Checksums: fileDetail.Checksums,
}
mutex.Unlock()
}(i)
}
wg.Wait()
//get unsorted data and sort it
sorted := make(helpers.TimeSlice, 0, len(unsorted))
for _, d := range unsorted {
sorted = append(sorted, d)
}
sort.Sort(sorted)
helpers.PrintSorted(sorted, url, repo, download)
return sorted
}
//DownloadFilesList download files selected
func DownloadFilesList(sorted helpers.TimeSlice, creds auth.Creds, flags helpers.Flags, masterkey, readmeFileName string)
|
for i := 0; i < len(numbers); i++ {
if strings.Contains(numbers[i], "-") {
log.Info("found number with dash ", numbers[i])
splitNumbers := strings.Split(numbers[i], "-")
first, err := strconv.Atoi(splitNumbers[0])
helpers.Check(err, true, "floor check", helpers.Trace())
second, err := strconv.Atoi(splitNumbers[len(splitNumbers)-1])
helpers.Check(err, true, "ceiling check", helpers.Trace())
for j := first; j <= second; j++ {
log.Debug("adding to download:", j)
words = append(words, strconv.Itoa(j))
}
} else {
words = append(words, numbers[i])
}
}
}
log.Debug("downloading the indexes (raw):", words)
//remove duplicates from list
check := make(map[string]int)
for _, val := range words {
check[val] = 1
}
words = nil
for letter, _ := range check {
words = append(words, letter)
}
log.Info("downloading the indexes (dedup):", words)
//path := strings.TrimPrefix(sorted[0].DownloadURI, creds.URL+"/"+creds.Repository+"/")
path := strings.TrimPrefix(sorted[0].Path, "/")
log.Debug("Path trimmed:" + path)
path = path[:strings.IndexByte(path, '/')]
relativePath := creds.DlLocation + "/" + path + "/"
var filesystemChecksums = make(map[string]string)
if _, err := os.Stat(relativePath); os.IsNotExist(err) {
log.Debug("%s does not exist, creating\n", relativePath)
err2 := os.Mkdir(relativePath, 0700)
helpers.Check(err2, true, "Creating log folder", helpers.Trace())
} else {
log.Info(relativePath, " exists, running checksum validation")
f, err := os.Open(relativePath)
helpers.Check(err, true, "Opening download directory", helpers.Trace())
files, err := f.Readdir(-1)
f.Close()
helpers.Check(err, true, "Reading download directory files", helpers.Trace())
for _, file := range files {
if file.IsDir() {
//I guess we could walk the entire tree if we wanted..
log.Info(file.Name(), " is a directory. skipping\n")
continue
}
//store list of checksums in memory then compare before download
if flags.SkipDownloadedChecksumCheckVar == false {
log.Debug("Checksum check not skipped for:", relativePath+file.Name())
sha2 := helpers.ComputeSha256(relativePath + file.Name())
filesystemChecksums[sha2] = relativePath + file.Name()
}
}
}
//create file
readme := relativePath + "/" + readmeFileName
log.Debug("Trying to create readme file under ", readme)
DetectDetailsFile(readme, masterkey)
log.Debug("size of index", words)
for key := range words {
//check if the index is an invalid option, skip if needed
size := helpers.StringToInt64(words[key])
if size < 1 || size > int64(sortedSize) {
log.Warn("Out of bounds number ", words[key], ", skipping")
continue
}
//fileName := strings.TrimPrefix(sorted[size-1].DownloadURI, creds.URL+"/"+creds.Repository+"/"+path+"/")
fileName := strings.TrimPrefix(sorted[size-1].Path, "/"+path+"/")
log.Debug("fileName trimmed:", fileName, " path:", path, " sorted[size-1].Path:", sorted[size-1].Path)
//check shasum of download against in folder
if filesystemChecksums[sorted[size-1].Checksums.Sha256] != "" {
log.Info("file ", fileName, " exists, skipping download\n")
continue
}
log.Info("downloading ", words[key], " ", sorted[size-1].DownloadURI)
log.Debug("sorted:", sorted)
// do some naive file type detection here
readableFilesExtensions := []string{"txt", "pdf", "json", "yaml", "yml", "json", "xml", "log"}
var readableFile bool
for i := range readableFilesExtensions {
if strings.HasSuffix(fileName, readableFilesExtensions[i]) {
log.Info("do not create folder, is readable without unarchiving:", fileName)
readableFile = true
}
}
oldRelativePath := relativePath
if !readableFile {
log.Info("creating folder due to archive:", relativePath+fileName+"-folder")
err := os.Mkdir(relativePath+fileName+"-folder", 0755)
helpers.Check(err, false, "Archive folder create", helpers.Trace())
relativePath = relativePath + fileName + "-folder/"
}
_, filepath := auth.GetRestAPI(sorted[size-1].DownloadURI, creds.Username, creds.Apikey, relativePath+fileName, sorted[size-1].Checksums.Sha256)
if !readableFile {
log.Debug("creating symlink for file:", fileName)
os.Symlink(relativePath+fileName, oldRelativePath+"."+fileName)
//create symlink post download for checksum checker
}
log.Info("Successfully finished downloading ", sorted[size-1].DownloadURI)
//try to unarchive if true
if flags.UnzipVar {
//file type detection
buff := make([]byte, 512)
file, err := os.Open(filepath)
helpers.Check(err, true, "File testing failed at open:", helpers.Trace())
_, err = file.Read(buff)
helpers.Check(err, true, "File testing failed at read:", helpers.Trace())
filetype := http.DetectContentType(buff)
switch filetype {
case "application/x-gzip", "application/zip":
log.Info("File is compressed with gzip or zip, attempting to unzip")
log.Debug("Unzipping ", filepath, " to ", filepath+"-folder")
err := helpers.Unzip(filepath, filepath+"-folder")
if err != nil {
log.Error(err)
}
default
|
{
sortedSize := len(sorted)
fmt.Println("Which files do you wish to download? Please separate each number by a space. Use a '-' for ranges, like: 1 3-6 11-12:")
reader := bufio.NewReader(os.Stdin)
downloadIn, _ := reader.ReadString('\n')
download := strings.TrimSuffix(downloadIn, "\n")
words := strings.Fields(download)
//download all
if strings.HasPrefix(download, "0 ") || download == "0" || strings.HasSuffix(download, " 0") || strings.Contains(download, " 0 ") {
log.Info("zero detected, downloading everything")
words = nil
for i := 0; i < sortedSize; i++ {
t := strconv.Itoa(i + 1)
words = append(words, t)
}
} else if strings.Contains(download, "-") {
//parse ranges
words = nil
numbers := strings.Split(download, " ")
|
identifier_body
|
rest.go
|
(username, apiKey, url, repo, download string) helpers.TimeSlice {
//create map of all file details from list of files
var unsorted = make(map[int]helpers.FileStorageJSON)
var filesList helpers.StorageJSON
var data, _ = auth.GetRestAPI(url+"/api/storage/"+repo+"/"+download+"/", username, apiKey, "", "")
json.Unmarshal([]byte(data), &filesList)
for len(filesList.Children) == 0 {
fmt.Println("No files found under " + url + "/" + repo + "/" + download + "/. Enter again, or type n to quit:")
reader := bufio.NewReader(os.Stdin)
downloadIn, _ := reader.ReadString('\n')
download = strings.TrimSuffix(downloadIn, "\n")
if download == "n" {
os.Exit(0)
}
data, _ = auth.GetRestAPI(url+"/api/storage/"+repo+"/"+download+"/", username, apiKey, "", "")
json.Unmarshal([]byte(data), &filesList)
}
fmt.Println("Found the following files under " + url + "/" + repo + "/" + download + "/\nNumber\tLast Modified\t\tSize\tPath")
var mutex = &sync.Mutex{} //should help with the concurrent map writes issue
var wg sync.WaitGroup //multi threading the GET details request
wg.Add(len(filesList.Children))
for i := 0; i < len(filesList.Children); i++ {
go func(i int) {
defer wg.Done()
var fileDetail helpers.FileStorageJSON
var data2, _ = auth.GetRestAPI(url+"/api/storage/"+repo+"/"+download+filesList.Children[i].URI, username, apiKey, "", "")
json.Unmarshal([]byte(data2), &fileDetail)
log.Debug("Debug before, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail, " download uri:", download+filesList.Children[i].URI)
if strings.Contains(download+filesList.Children[i].URI, "%") {
log.Warn("Encoding charactrer % detected in file URL, ", download+filesList.Children[i].URI, ", skipping")
return
}
if !strings.Contains(fileDetail.DownloadURI, url) {
log.Debug("Debug, url details:", fileDetail.DownloadURI, " :", url, " :data:", fileDetail)
log.Warn("It looks like your URL context has been updated, as the file URL is different. Please reset your download.json")
//os.Exit(1)
}
time, _ := time.Parse(time.RFC3339, fileDetail.LastModified)
mutex.Lock()
unsorted[i+1] = helpers.FileStorageJSON{
LastModified: fileDetail.LastModified,
ConvertedTime: time,
Size: fileDetail.Size,
Path: fileDetail.Path,
DownloadURI: fileDetail.DownloadURI,
Checksums: fileDetail.Checksums,
}
mutex.Unlock()
}(i)
}
wg.Wait()
//get unsorted data and sort it
sorted := make(helpers.TimeSlice, 0, len(unsorted))
for _, d := range unsorted {
sorted = append(sorted, d)
}
sort.Sort(sorted)
helpers.PrintSorted(sorted, url, repo, download)
return sorted
}
//DownloadFilesList download files selected
func DownloadFilesList(sorted helpers.TimeSlice, creds auth.Creds, flags helpers.Flags, masterkey, readmeFileName string) {
sortedSize := len(sorted)
fmt.Println("Which files do you wish to download? Please separate each number by a space. Use a '-' for ranges, like: 1 3-6 11-12:")
reader := bufio.NewReader(os.Stdin)
downloadIn, _ := reader.ReadString('\n')
download := strings.TrimSuffix(downloadIn, "\n")
words := strings.Fields(download)
//download all
if strings.HasPrefix(download, "0 ") || download == "0" || strings.HasSuffix(download, " 0") || strings.Contains(download, " 0 ") {
log.Info("zero detected, downloading everything")
words = nil
for i := 0; i < sortedSize; i++ {
t := strconv.Itoa(i + 1)
words = append(words, t)
}
} else if strings.Contains(download, "-") {
//parse ranges
words = nil
numbers := strings.Split(download, " ")
for i := 0; i < len(numbers); i++ {
if strings.Contains(numbers[i], "-") {
log.Info("found number with dash ", numbers[i])
splitNumbers := strings.Split(numbers[i], "-")
first, err := strconv.Atoi(splitNumbers[0])
helpers.Check(err, true, "floor check", helpers.Trace())
second, err := strconv.Atoi(splitNumbers[len(splitNumbers)-1])
helpers.Check(err, true, "ceiling check", helpers.Trace())
for j := first; j <= second; j++ {
log.Debug("adding to download:", j)
words = append(words, strconv.Itoa(j))
}
} else {
words = append(words, numbers[i])
}
}
}
log.Debug("downloading the indexes (raw):", words)
//remove duplicates from list
check := make(map[string]int)
for _, val := range words {
check[val] = 1
}
words = nil
for letter, _ := range check {
words = append(words, letter)
}
log.Info("downloading the indexes (dedup):", words)
//path := strings.TrimPrefix(sorted[0].DownloadURI, creds.URL+"/"+creds.Repository+"/")
path := strings.TrimPrefix(sorted[0].Path, "/")
log.Debug("Path trimmed:" + path)
path = path[:strings.IndexByte(path, '/')]
relativePath := creds.DlLocation + "/" + path + "/"
var filesystemChecksums = make(map[string]string)
if _, err := os.Stat(relativePath); os.IsNotExist(err) {
log.Debug("%s does not exist, creating\n", relativePath)
err2 := os.Mkdir(relativePath, 0700)
helpers.Check(err2, true, "Creating log folder", helpers.Trace())
} else {
log.Info(relativePath, " exists, running checksum validation")
f, err := os.Open(relativePath)
helpers.Check(err, true, "Opening download directory", helpers.Trace())
files, err := f.Readdir(-1)
f.Close()
helpers.Check(err, true, "Reading download directory files", helpers.Trace())
for _, file := range files {
if file.IsDir() {
//I guess we could walk the entire tree if we wanted..
log.Info(file.Name(), " is a directory. skipping\n")
continue
}
//store list of checksums in memory then compare before download
if flags.SkipDownloadedChecksumCheckVar == false {
log.Debug("Checksum check not skipped for:", relativePath+file.Name())
sha2 := helpers.ComputeSha256(relativePath + file.Name())
filesystemChecksums[sha2] = relativePath + file.Name()
}
}
}
//create file
readme := relativePath + "/" + readmeFileName
log.Debug("Trying to create readme file under ", readme)
DetectDetailsFile(readme, masterkey)
log.Debug("size of index", words)
for key := range words {
//check if the index is an invalid option, skip if needed
size := helpers.StringToInt64(words[key])
if size < 1 || size > int64(sortedSize) {
log.Warn("Out of bounds number ", words[key], ", skipping")
continue
}
//fileName := strings.TrimPrefix(sorted[size-1].DownloadURI, creds.URL+"/"+creds.Repository+"/"+path+"/")
fileName := strings.TrimPrefix(sorted[size-1].Path, "/"+path+"/")
log.Debug("fileName trimmed:", fileName, " path:", path, " sorted[size-1].Path:", sorted[size-1].Path)
//check shasum of download against in folder
if filesystemChecksums[sorted[size-1].Checksums.Sha256] != "" {
log.Info("file ", fileName, " exists, skipping download\n")
continue
}
log.Info("downloading ", words[key], " ", sorted[size-1].DownloadURI)
log.Debug("sorted:", sorted)
// do some naive file type detection here
readableFilesExtensions := []string{"txt", "pdf", "json", "yaml", "yml", "json", "xml", "log"}
var readableFile bool
for i := range readableFilesExtensions {
if strings.HasSuffix(fileName, readableFilesExtensions[i]) {
log.Info("do not create folder, is readable without unarchiving:", fileName)
readableFile = true
}
}
oldRelativePath := relativePath
if !readableFile {
log.Info("creating folder due to archive:", relativePath+fileName+"-folder")
err := os.Mkdir(relativePath+fileName+"-folder", 0755)
helpers.Check(err, false, "Archive folder create", helpers.Trace())
relativePath = relativePath + fileName + "-folder/"
}
_, filepath := auth.GetRestAPI(sorted[size-1].DownloadURI, creds.Username, creds.Apikey, relativePath+fileName, sorted[size-1
|
GetFilesDetails
|
identifier_name
|
|
common.rs
|
-> bool {
!self.defeated() && self.owned_tiles > 0
}
}
/// Represent an action a player can perform.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
#[serde(rename_all = "lowercase")]
pub enum Action {
/// Resign
Resign,
/// Cancel all the moves already queued for the player
#[serde(rename = "cancel_moves")]
CancelMoves,
/// Make a move from a tile to another
Move(Move),
}
/// Represent a move from one tile to another. During a move, units are transfered from one tile to
/// another adjacent tile.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct Move {
/// Player that is making the move.
#[serde(skip)]
pub player: PlayerId,
/// Index of the tile from which troops are being moved.
pub from: usize,
/// Direction to which the troops are being moved.
pub direction: Direction,
}
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum Direction {
|
Up,
Down,
}
#[derive(Copy, Clone, Debug, PartialEq)]
/// Outcome of a move
pub enum MoveOutcome {
/// Outcome when a move resulted in a general being captured. The player ID is the ID of the
/// defeated player.
GeneralCaptured(PlayerId),
/// Outcome when a move resulted in an open tile or a city tile being captured. If the tile
/// was belonging to a different player than the one making the move, the player's ID is
/// specified.
TileCaptured(Option<PlayerId>),
/// Outcome when a move did not result in a tile being captured.
StatuQuo,
}
/// Represent the different types of open (ie non-mountain) tiles
#[derive(Copy, Clone, PartialEq, Debug, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum TileKind {
/// A tile that contains a general
General,
/// A tile that contains a city
City,
/// A regular tile
Open,
/// A tile that contains a mountain
Mountain,
}
/// Represent an open tile. Open tiles are tiles that are not mountains, ie tiles that players can
/// conquer.
#[derive(Clone, PartialEq, Debug, Serialize)]
pub struct Tile {
/// The ID of the player that currenlty owns the tile (a player own a tile if he/she has units
/// occupying the tile).
#[serde(skip_serializing_if = "Option::is_none")]
owner: Option<PlayerId>,
/// Number of units occupying the tile
#[serde(skip_serializing_if = "has_no_unit")]
units: u16,
/// The type of tile (open, city or general)
#[serde(skip_serializing_if = "is_open")]
kind: TileKind,
/// List of players that can see the tile. To be able to see an open tile, a player must own a
/// tile that touches it.
#[serde(skip)]
visible_by: HashSet<PlayerId>,
/// Players that had visibility on this tile when it changed.
#[serde(skip)]
dirty_for: HashSet<PlayerId>,
}
/// Small helper used by serde to avoid serializing the `kind` field if the tile if of type
/// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency.
fn is_open(kind: &TileKind) -> bool {
*kind == TileKind::Open
}
/// Small helper used by serde to avoid serializing the `units` field if the tile does not have any
/// units. We try to keep the jsons as small as possible for network efficiency.
fn has_no_unit(units: &u16) -> bool {
*units == 0
}
impl Tile {
/// Return a new open tile or the given type, with no owner, and no unit.
pub fn new() -> Self {
Tile {
owner: None,
units: 0,
dirty_for: HashSet::new(),
visible_by: HashSet::new(),
kind: TileKind::Mountain,
}
}
/// Return whether the tile is marked as visible by the given player.
pub fn is_visible_by(&self, player: PlayerId) -> bool {
self.visible_by.contains(&player)
}
/// Mark the tile as invisible for the given player
pub fn hide_from(&mut self, player: PlayerId) {
let was_visible = self.visible_by.remove(&player);
if was_visible {
self.dirty_for.insert(player);
}
}
/// Mark the tile as visible for the given player, updating the source and destination tiles
/// state if necessary (number of units, owner, etc.).
pub fn reveal_to(&mut self, player: PlayerId) {
self.visible_by.insert(player);
self.dirty_for.insert(player);
}
/// Perform a move from a source tile to a destination tile.
pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove> {
if self.is_mountain() {
return Err(InvalidMove::FromInvalidTile);
}
if dst.is_mountain() {
return Err(InvalidMove::ToInvalidTile);
}
if self.units() < 2 {
return Err(InvalidMove::NotEnoughUnits);
}
let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?;
let outcome = match dst.owner {
// The destination tile belongs to someone else
Some(defender) if defender != attacker => {
// The defender has more units.
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
}
// The attacker has more units. Capture the tile.
else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
// We're capturing a general
if dst.kind == TileKind::General {
// Turn the general into a regular city
dst.kind = TileKind::City;
MoveOutcome::GeneralCaptured(defender)
}
// We're capturing a regular tile
else {
MoveOutcome::TileCaptured(Some(defender))
}
}
}
// The owner is the same for both tiles, just transfer the unit
Some(_defender) => {
dst.units += self.units - 1;
MoveOutcome::StatuQuo
}
// The destination tile is not owned by anyone.
None => {
// The destination has more units, we can't capture it
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
} else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
MoveOutcome::TileCaptured(None)
}
}
};
// In any case, we always only leave 1 unit in the source tile
// TODO: would be nice to support splitting the source tile units before moving.
self.units = 1;
self.set_dirty();
dst.set_dirty();
Ok(outcome)
}
/// Return the owner of the tile, if any
pub fn owner(&self) -> Option<PlayerId> {
self.owner
}
/// Return the number of units occupying the tile
pub fn units(&self) -> u16 {
self.units
}
/// Return whether the tile is open. A tile is open if it's not a city, a general or a
/// mountain.
pub fn is_open(&self) -> bool {
self.kind == TileKind::Open
}
/// Return whether the tile is a general.
pub fn is_general(&self) -> bool {
self.kind == TileKind::General
}
/// Return whether the tile is a city.
pub fn is_city(&self) -> bool {
self.kind == TileKind::City
}
/// Return whether the tile is a mountain
pub fn is_mountain(&self) -> bool {
self.kind == TileKind::Mountain
}
/// Turn the tile into an open tile
pub fn make_open(&mut self) {
self.kind = TileKind::Open;
self.set_dirty();
}
pub fn set_dirty(&mut self) {
for player_id in self.visible_by.iter() {
self.dirty_for.insert(*player_id);
}
}
/// Turn the tile into a general
pub fn make_general(&mut self) {
self.kind = TileKind::General;
self.set_dirty();
}
// // FIXME: unused for now, but that's because we don't have city yet
// /// Turn the tile into a fortess.
// pub fn make_city(&mut self) {
// self.kind = TileKind::City;
// self.set_dirty();
// }
/// Turn the tile into a mountain.
pub fn make_mountain(&mut self) {
self.kind = TileKind::Mountain;
self.set_dirty();
}
/// Set the number of units occupying the tile
pub fn set_units(&mut self, units: u16) {
if self.is_mountain() {
return;
}
self.units = units;
self.set_dirty();
}
/// Increment the number of units
|
Right,
Left,
|
random_line_split
|
common.rs
|
kind: TileKind,
/// List of players that can see the tile. To be able to see an open tile, a player must own a
/// tile that touches it.
#[serde(skip)]
visible_by: HashSet<PlayerId>,
/// Players that had visibility on this tile when it changed.
#[serde(skip)]
dirty_for: HashSet<PlayerId>,
}
/// Small helper used by serde to avoid serializing the `kind` field if the tile if of type
/// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency.
fn is_open(kind: &TileKind) -> bool {
*kind == TileKind::Open
}
/// Small helper used by serde to avoid serializing the `units` field if the tile does not have any
/// units. We try to keep the jsons as small as possible for network efficiency.
fn has_no_unit(units: &u16) -> bool {
*units == 0
}
impl Tile {
/// Return a new open tile or the given type, with no owner, and no unit.
pub fn new() -> Self {
Tile {
owner: None,
units: 0,
dirty_for: HashSet::new(),
visible_by: HashSet::new(),
kind: TileKind::Mountain,
}
}
/// Return whether the tile is marked as visible by the given player.
pub fn is_visible_by(&self, player: PlayerId) -> bool {
self.visible_by.contains(&player)
}
/// Mark the tile as invisible for the given player
pub fn hide_from(&mut self, player: PlayerId) {
let was_visible = self.visible_by.remove(&player);
if was_visible {
self.dirty_for.insert(player);
}
}
/// Mark the tile as visible for the given player, updating the source and destination tiles
/// state if necessary (number of units, owner, etc.).
pub fn reveal_to(&mut self, player: PlayerId) {
self.visible_by.insert(player);
self.dirty_for.insert(player);
}
/// Perform a move from a source tile to a destination tile.
pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove> {
if self.is_mountain() {
return Err(InvalidMove::FromInvalidTile);
}
if dst.is_mountain() {
return Err(InvalidMove::ToInvalidTile);
}
if self.units() < 2 {
return Err(InvalidMove::NotEnoughUnits);
}
let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?;
let outcome = match dst.owner {
// The destination tile belongs to someone else
Some(defender) if defender != attacker => {
// The defender has more units.
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
}
// The attacker has more units. Capture the tile.
else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
// We're capturing a general
if dst.kind == TileKind::General {
// Turn the general into a regular city
dst.kind = TileKind::City;
MoveOutcome::GeneralCaptured(defender)
}
// We're capturing a regular tile
else {
MoveOutcome::TileCaptured(Some(defender))
}
}
}
// The owner is the same for both tiles, just transfer the unit
Some(_defender) => {
dst.units += self.units - 1;
MoveOutcome::StatuQuo
}
// The destination tile is not owned by anyone.
None => {
// The destination has more units, we can't capture it
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
} else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
MoveOutcome::TileCaptured(None)
}
}
};
// In any case, we always only leave 1 unit in the source tile
// TODO: would be nice to support splitting the source tile units before moving.
self.units = 1;
self.set_dirty();
dst.set_dirty();
Ok(outcome)
}
/// Return the owner of the tile, if any
pub fn owner(&self) -> Option<PlayerId> {
self.owner
}
/// Return the number of units occupying the tile
pub fn units(&self) -> u16 {
self.units
}
/// Return whether the tile is open. A tile is open if it's not a city, a general or a
/// mountain.
pub fn is_open(&self) -> bool {
self.kind == TileKind::Open
}
/// Return whether the tile is a general.
pub fn is_general(&self) -> bool {
self.kind == TileKind::General
}
/// Return whether the tile is a city.
pub fn is_city(&self) -> bool {
self.kind == TileKind::City
}
/// Return whether the tile is a mountain
pub fn is_mountain(&self) -> bool {
self.kind == TileKind::Mountain
}
/// Turn the tile into an open tile
pub fn make_open(&mut self) {
self.kind = TileKind::Open;
self.set_dirty();
}
pub fn set_dirty(&mut self) {
for player_id in self.visible_by.iter() {
self.dirty_for.insert(*player_id);
}
}
/// Turn the tile into a general
pub fn make_general(&mut self) {
self.kind = TileKind::General;
self.set_dirty();
}
// // FIXME: unused for now, but that's because we don't have city yet
// /// Turn the tile into a fortess.
// pub fn make_city(&mut self) {
// self.kind = TileKind::City;
// self.set_dirty();
// }
/// Turn the tile into a mountain.
pub fn make_mountain(&mut self) {
self.kind = TileKind::Mountain;
self.set_dirty();
}
/// Set the number of units occupying the tile
pub fn set_units(&mut self, units: u16) {
if self.is_mountain() {
return;
}
self.units = units;
self.set_dirty();
}
/// Increment the number of units occupying the tile
pub fn incr_units(&mut self, units: u16) {
if self.is_mountain() {
return;
}
self.units += units;
self.set_dirty();
}
/// Set the owner of the tile. To remove the existing owner, set the owner to `None`.
pub fn set_owner(&mut self, player: Option<PlayerId>) {
if self.is_mountain() {
return;
}
// Mark the tile as dirty for the players that have visibility on the tile
self.set_dirty();
// Mark the tile as dirty for the previous owner. As owner, it should have visibility on
// the tile, so should have been added `dirty_for` already, but let's be safe, it's pretty
// cheap.
if let Some(owner) = self.owner {
self.dirty_for.insert(owner);
}
self.owner = player;
if let Some(owner) = self.owner {
self.reveal_to(owner);
}
}
/// Return whether the tile's state has changed. A tile state changes when its type, its owner,
/// or the number of units occupying it changes.
pub fn is_dirty(&self) -> bool {
!self.dirty_for.is_empty()
}
pub fn is_dirty_for(&self, player_id: PlayerId) -> bool {
self.dirty_for.contains(&player_id)
}
/// Mark the tile a clean. This should be called to acknoledge that the tile has been processed
/// when after is was marked as dirty.
pub fn set_clean(&mut self) {
let _ = self.dirty_for.drain();
}
}
/// Represent an error that occurs when an invalid move is processed.
#[derive(Debug, PartialEq, Eq)]
pub enum InvalidMove {
/// The source tile does not have enough units to perform the move. To be able to move from one
/// tile, the tile must have at least two units.
NotEnoughUnits,
/// The destination tile is invalid (it can be a mountain or an out-of-grid tile. This occurs
/// for instance if the source tile is on the top row, and the move is upward.
ToInvalidTile,
/// The source tile is either a mountain or out of the grid.
FromInvalidTile,
/// The source tile does not belong to the player making the move. A move can only be performed
/// by a player.
SourceTileNotOwned,
}
use std::error::Error;
use std::fmt;
impl Error for InvalidMove {
fn description(&self) -> &str {
match *self {
InvalidMove::NotEnoughUnits => "not enough unit on the source tile",
InvalidMove::ToInvalidTile => {
"the destination tile is either a mountain or not on the map"
}
InvalidMove::FromInvalidTile =>
|
{
"the source tile is either a mountain or not on the map"
}
|
conditional_block
|
|
common.rs
|
-> bool {
!self.defeated() && self.owned_tiles > 0
}
}
/// Represent an action a player can perform.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
#[serde(rename_all = "lowercase")]
pub enum Action {
/// Resign
Resign,
/// Cancel all the moves already queued for the player
#[serde(rename = "cancel_moves")]
CancelMoves,
/// Make a move from a tile to another
Move(Move),
}
/// Represent a move from one tile to another. During a move, units are transfered from one tile to
/// another adjacent tile.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct Move {
/// Player that is making the move.
#[serde(skip)]
pub player: PlayerId,
/// Index of the tile from which troops are being moved.
pub from: usize,
/// Direction to which the troops are being moved.
pub direction: Direction,
}
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum Direction {
Right,
Left,
Up,
Down,
}
#[derive(Copy, Clone, Debug, PartialEq)]
/// Outcome of a move
pub enum MoveOutcome {
/// Outcome when a move resulted in a general being captured. The player ID is the ID of the
/// defeated player.
GeneralCaptured(PlayerId),
/// Outcome when a move resulted in an open tile or a city tile being captured. If the tile
/// was belonging to a different player than the one making the move, the player's ID is
/// specified.
TileCaptured(Option<PlayerId>),
/// Outcome when a move did not result in a tile being captured.
StatuQuo,
}
/// Represent the different types of open (ie non-mountain) tiles
#[derive(Copy, Clone, PartialEq, Debug, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum TileKind {
/// A tile that contains a general
General,
/// A tile that contains a city
City,
/// A regular tile
Open,
/// A tile that contains a mountain
Mountain,
}
/// Represent an open tile. Open tiles are tiles that are not mountains, ie tiles that players can
/// conquer.
#[derive(Clone, PartialEq, Debug, Serialize)]
pub struct Tile {
/// The ID of the player that currenlty owns the tile (a player own a tile if he/she has units
/// occupying the tile).
#[serde(skip_serializing_if = "Option::is_none")]
owner: Option<PlayerId>,
/// Number of units occupying the tile
#[serde(skip_serializing_if = "has_no_unit")]
units: u16,
/// The type of tile (open, city or general)
#[serde(skip_serializing_if = "is_open")]
kind: TileKind,
/// List of players that can see the tile. To be able to see an open tile, a player must own a
/// tile that touches it.
#[serde(skip)]
visible_by: HashSet<PlayerId>,
/// Players that had visibility on this tile when it changed.
#[serde(skip)]
dirty_for: HashSet<PlayerId>,
}
/// Small helper used by serde to avoid serializing the `kind` field if the tile if of type
/// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency.
fn is_open(kind: &TileKind) -> bool {
*kind == TileKind::Open
}
/// Small helper used by serde to avoid serializing the `units` field if the tile does not have any
/// units. We try to keep the jsons as small as possible for network efficiency.
fn has_no_unit(units: &u16) -> bool {
*units == 0
}
impl Tile {
/// Return a new open tile or the given type, with no owner, and no unit.
pub fn new() -> Self {
Tile {
owner: None,
units: 0,
dirty_for: HashSet::new(),
visible_by: HashSet::new(),
kind: TileKind::Mountain,
}
}
/// Return whether the tile is marked as visible by the given player.
pub fn is_visible_by(&self, player: PlayerId) -> bool {
self.visible_by.contains(&player)
}
/// Mark the tile as invisible for the given player
pub fn
|
(&mut self, player: PlayerId) {
let was_visible = self.visible_by.remove(&player);
if was_visible {
self.dirty_for.insert(player);
}
}
/// Mark the tile as visible for the given player, updating the source and destination tiles
/// state if necessary (number of units, owner, etc.).
pub fn reveal_to(&mut self, player: PlayerId) {
self.visible_by.insert(player);
self.dirty_for.insert(player);
}
/// Perform a move from a source tile to a destination tile.
pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove> {
if self.is_mountain() {
return Err(InvalidMove::FromInvalidTile);
}
if dst.is_mountain() {
return Err(InvalidMove::ToInvalidTile);
}
if self.units() < 2 {
return Err(InvalidMove::NotEnoughUnits);
}
let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?;
let outcome = match dst.owner {
// The destination tile belongs to someone else
Some(defender) if defender != attacker => {
// The defender has more units.
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
}
// The attacker has more units. Capture the tile.
else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
// We're capturing a general
if dst.kind == TileKind::General {
// Turn the general into a regular city
dst.kind = TileKind::City;
MoveOutcome::GeneralCaptured(defender)
}
// We're capturing a regular tile
else {
MoveOutcome::TileCaptured(Some(defender))
}
}
}
// The owner is the same for both tiles, just transfer the unit
Some(_defender) => {
dst.units += self.units - 1;
MoveOutcome::StatuQuo
}
// The destination tile is not owned by anyone.
None => {
// The destination has more units, we can't capture it
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
} else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
MoveOutcome::TileCaptured(None)
}
}
};
// In any case, we always only leave 1 unit in the source tile
// TODO: would be nice to support splitting the source tile units before moving.
self.units = 1;
self.set_dirty();
dst.set_dirty();
Ok(outcome)
}
/// Return the owner of the tile, if any
pub fn owner(&self) -> Option<PlayerId> {
self.owner
}
/// Return the number of units occupying the tile
pub fn units(&self) -> u16 {
self.units
}
/// Return whether the tile is open. A tile is open if it's not a city, a general or a
/// mountain.
pub fn is_open(&self) -> bool {
self.kind == TileKind::Open
}
/// Return whether the tile is a general.
pub fn is_general(&self) -> bool {
self.kind == TileKind::General
}
/// Return whether the tile is a city.
pub fn is_city(&self) -> bool {
self.kind == TileKind::City
}
/// Return whether the tile is a mountain
pub fn is_mountain(&self) -> bool {
self.kind == TileKind::Mountain
}
/// Turn the tile into an open tile
pub fn make_open(&mut self) {
self.kind = TileKind::Open;
self.set_dirty();
}
pub fn set_dirty(&mut self) {
for player_id in self.visible_by.iter() {
self.dirty_for.insert(*player_id);
}
}
/// Turn the tile into a general
pub fn make_general(&mut self) {
self.kind = TileKind::General;
self.set_dirty();
}
// // FIXME: unused for now, but that's because we don't have city yet
// /// Turn the tile into a fortess.
// pub fn make_city(&mut self) {
// self.kind = TileKind::City;
// self.set_dirty();
// }
/// Turn the tile into a mountain.
pub fn make_mountain(&mut self) {
self.kind = TileKind::Mountain;
self.set_dirty();
}
/// Set the number of units occupying the tile
pub fn set_units(&mut self, units: u16) {
if self.is_mountain() {
return;
}
self.units = units;
self.set_dirty();
}
/// Increment the number of
|
hide_from
|
identifier_name
|
common.rs
|
-> bool {
!self.defeated() && self.owned_tiles > 0
}
}
/// Represent an action a player can perform.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
#[serde(rename_all = "lowercase")]
pub enum Action {
/// Resign
Resign,
/// Cancel all the moves already queued for the player
#[serde(rename = "cancel_moves")]
CancelMoves,
/// Make a move from a tile to another
Move(Move),
}
/// Represent a move from one tile to another. During a move, units are transfered from one tile to
/// another adjacent tile.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct Move {
/// Player that is making the move.
#[serde(skip)]
pub player: PlayerId,
/// Index of the tile from which troops are being moved.
pub from: usize,
/// Direction to which the troops are being moved.
pub direction: Direction,
}
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum Direction {
Right,
Left,
Up,
Down,
}
#[derive(Copy, Clone, Debug, PartialEq)]
/// Outcome of a move
pub enum MoveOutcome {
/// Outcome when a move resulted in a general being captured. The player ID is the ID of the
/// defeated player.
GeneralCaptured(PlayerId),
/// Outcome when a move resulted in an open tile or a city tile being captured. If the tile
/// was belonging to a different player than the one making the move, the player's ID is
/// specified.
TileCaptured(Option<PlayerId>),
/// Outcome when a move did not result in a tile being captured.
StatuQuo,
}
/// Represent the different types of open (ie non-mountain) tiles
#[derive(Copy, Clone, PartialEq, Debug, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum TileKind {
/// A tile that contains a general
General,
/// A tile that contains a city
City,
/// A regular tile
Open,
/// A tile that contains a mountain
Mountain,
}
/// Represent an open tile. Open tiles are tiles that are not mountains, ie tiles that players can
/// conquer.
#[derive(Clone, PartialEq, Debug, Serialize)]
pub struct Tile {
/// The ID of the player that currenlty owns the tile (a player own a tile if he/she has units
/// occupying the tile).
#[serde(skip_serializing_if = "Option::is_none")]
owner: Option<PlayerId>,
/// Number of units occupying the tile
#[serde(skip_serializing_if = "has_no_unit")]
units: u16,
/// The type of tile (open, city or general)
#[serde(skip_serializing_if = "is_open")]
kind: TileKind,
/// List of players that can see the tile. To be able to see an open tile, a player must own a
/// tile that touches it.
#[serde(skip)]
visible_by: HashSet<PlayerId>,
/// Players that had visibility on this tile when it changed.
#[serde(skip)]
dirty_for: HashSet<PlayerId>,
}
/// Small helper used by serde to avoid serializing the `kind` field if the tile if of type
/// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency.
fn is_open(kind: &TileKind) -> bool {
*kind == TileKind::Open
}
/// Small helper used by serde to avoid serializing the `units` field if the tile does not have any
/// units. We try to keep the jsons as small as possible for network efficiency.
fn has_no_unit(units: &u16) -> bool {
*units == 0
}
impl Tile {
/// Return a new open tile or the given type, with no owner, and no unit.
pub fn new() -> Self {
Tile {
owner: None,
units: 0,
dirty_for: HashSet::new(),
visible_by: HashSet::new(),
kind: TileKind::Mountain,
}
}
/// Return whether the tile is marked as visible by the given player.
pub fn is_visible_by(&self, player: PlayerId) -> bool {
self.visible_by.contains(&player)
}
/// Mark the tile as invisible for the given player
pub fn hide_from(&mut self, player: PlayerId) {
let was_visible = self.visible_by.remove(&player);
if was_visible {
self.dirty_for.insert(player);
}
}
/// Mark the tile as visible for the given player, updating the source and destination tiles
/// state if necessary (number of units, owner, etc.).
pub fn reveal_to(&mut self, player: PlayerId) {
self.visible_by.insert(player);
self.dirty_for.insert(player);
}
/// Perform a move from a source tile to a destination tile.
pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove>
|
// The attacker has more units. Capture the tile.
else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
// We're capturing a general
if dst.kind == TileKind::General {
// Turn the general into a regular city
dst.kind = TileKind::City;
MoveOutcome::GeneralCaptured(defender)
}
// We're capturing a regular tile
else {
MoveOutcome::TileCaptured(Some(defender))
}
}
}
// The owner is the same for both tiles, just transfer the unit
Some(_defender) => {
dst.units += self.units - 1;
MoveOutcome::StatuQuo
}
// The destination tile is not owned by anyone.
None => {
// The destination has more units, we can't capture it
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
} else {
dst.units = self.units - 1 - dst.units;
dst.owner = self.owner;
MoveOutcome::TileCaptured(None)
}
}
};
// In any case, we always only leave 1 unit in the source tile
// TODO: would be nice to support splitting the source tile units before moving.
self.units = 1;
self.set_dirty();
dst.set_dirty();
Ok(outcome)
}
/// Return the owner of the tile, if any
pub fn owner(&self) -> Option<PlayerId> {
self.owner
}
/// Return the number of units occupying the tile
pub fn units(&self) -> u16 {
self.units
}
/// Return whether the tile is open. A tile is open if it's not a city, a general or a
/// mountain.
pub fn is_open(&self) -> bool {
self.kind == TileKind::Open
}
/// Return whether the tile is a general.
pub fn is_general(&self) -> bool {
self.kind == TileKind::General
}
/// Return whether the tile is a city.
pub fn is_city(&self) -> bool {
self.kind == TileKind::City
}
/// Return whether the tile is a mountain
pub fn is_mountain(&self) -> bool {
self.kind == TileKind::Mountain
}
/// Turn the tile into an open tile
pub fn make_open(&mut self) {
self.kind = TileKind::Open;
self.set_dirty();
}
pub fn set_dirty(&mut self) {
for player_id in self.visible_by.iter() {
self.dirty_for.insert(*player_id);
}
}
/// Turn the tile into a general
pub fn make_general(&mut self) {
self.kind = TileKind::General;
self.set_dirty();
}
// // FIXME: unused for now, but that's because we don't have city yet
// /// Turn the tile into a fortess.
// pub fn make_city(&mut self) {
// self.kind = TileKind::City;
// self.set_dirty();
// }
/// Turn the tile into a mountain.
pub fn make_mountain(&mut self) {
self.kind = TileKind::Mountain;
self.set_dirty();
}
/// Set the number of units occupying the tile
pub fn set_units(&mut self, units: u16) {
if self.is_mountain() {
return;
}
self.units = units;
self.set_dirty();
}
/// Increment the number of
|
{
if self.is_mountain() {
return Err(InvalidMove::FromInvalidTile);
}
if dst.is_mountain() {
return Err(InvalidMove::ToInvalidTile);
}
if self.units() < 2 {
return Err(InvalidMove::NotEnoughUnits);
}
let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?;
let outcome = match dst.owner {
// The destination tile belongs to someone else
Some(defender) if defender != attacker => {
// The defender has more units.
if dst.units >= self.units - 1 {
dst.units -= self.units - 1;
MoveOutcome::StatuQuo
}
|
identifier_body
|
from_str.rs
|
checkless.len();
let (two_pos, promotion) = if let Some(equals) = checkless.find('=') {
let (left_over, promote) = checkless.split_at(equals);
let mut iter = promote.chars();
if iter.next() != Some('=') {
return Err(MoveError::Other);
}
let p = iter.next().ok_or(MoveError::Other)?;
if iter.next().is_some() {
return Err(MoveError::Other);
}
(left_over, Some(p))
} else {
(checkless, None)
};
let loc = if let Some(dash) = two_pos.find('-') {
dash
} else if let Some(x) = two_pos.find('x') {
x
} else {
return Err(MoveError::Other);
};
let (left, tmp) = two_pos.split_at(loc);
let (mid, mut right) = tmp.split_at(1); // x and - are both ascii and therefore 1 byte
let from = left.parse::<Position>()?;
let captured = if mid == "x" {
let mut iter = right.chars();
let start = iter.next().ok_or(MoveError::Other)?;
Some(if start.is_ascii_lowercase() {
'P'
} else {
right = iter.as_str();
start
})
} else {
None
};
let to = right.parse::<Position>()?;
Ok(BasicMove {
piece,
from,
captured,
to,
promotion,
checks,
mates,
})
}
}
impl FromStr for Move {
type Err = MoveError;
fn from_str(string: &str) -> Result<Self, Self::Err> {
use Move::*;
Ok(match string {
"#" => Checkmate,
"S" => Stalemate,
"T" => Timeout,
"R" => Resign,
s if s.starts_with("O-O") => {
let mateless = s.trim_end_matches('#');
let mates = s.len() - mateless.len();
match mateless {
"O-O-O" => QueenCastle(mates),
"O-O" => KingCastle(mates),
_ => return Err(MoveError::Castle),
}
}
_ => Normal(string.parse::<BasicMove>()?),
})
}
}
struct MovePair {
main: Move,
modifier: Option<Move>,
stalemate: bool,
}
impl FromStr for MovePair {
type Err = MoveError;
fn from_str(string: &str) -> Result<Self, Self::Err> {
let mut stalemate = false;
let break_index = if string.len() == 2 {
1 // No move is 2 bytes long
} else if string.len() > 2 {
if string.ends_with("RS") && !string.ends_with("=RS")
|| string.ends_with("TS") && !string.ends_with("=TS")
{
stalemate = true;
string.len() - 2
} else if (string.ends_with('R') && !string.ends_with("=R"))
|| (string.ends_with('S') && !string.ends_with("=S"))
|| (string.ends_with('T') && !string.ends_with("=T"))
{
string.len() - 1
} else {
0
}
} else {
0
};
Ok(if break_index == 0 {
Self {
main: string.parse()?,
modifier: None,
stalemate,
}
} else {
Self {
main: string.get(..break_index).ok_or(MoveError::Other)?.parse()?,
modifier: Some(
string
.get(break_index..(break_index + 1))
.ok_or(MoveError::Other)?
.parse()?,
),
stalemate,
}
})
}
}
#[derive(PartialEq, Clone, Debug)]
enum IntermediateError {
Other(usize),
TurnNumber(usize),
TurnNumberParse(usize, String),
TurnTooLong(usize),
MoveErr(MoveError, String, usize),
Description(usize),
}
fn parse_quarter(string: &str) -> Result<(QuarterTurn, &str), IntermediateError> {
/// Generally the move is bounded by whitespace, but supporting pgns that don't
/// have all the neccessary whitespace is good. Notably, whitespace before a new
/// line number is critical.
fn next_move(c: char) -> bool {
c.is_whitespace()
|| match c {
'.' | '{' | '(' | ')' => true,
_ => false,
}
}
use IntermediateError::*;
let trimmed = string.trim_start();
if trimmed == "" {
return Err(Other(trimmed.len()));
}
let split = trimmed.find(next_move).unwrap_or(string.len() - 1);
let (main_str, mut rest) = trimmed.split_at(split);
let move_pair = main_str
.trim()
.parse::<MovePair>()
.map_err(|m| MoveErr(m, main_str.to_owned(), rest.len()))?;
let mut description = None;
let mut alternatives = Vec::new();
rest = rest.trim_start();
if let Some(c) = rest.chars().next() {
if c == '{' {
let desc_end = rest.find('}').ok_or(Description(rest.len()))?;
let (mut desc_str, rest_tmp) = rest.split_at(desc_end + 1);
desc_str = desc_str.strip_prefix("{ ").ok_or(Description(rest.len()))?;
desc_str = desc_str.strip_suffix(" }").ok_or(Description(rest.len()))?;
description = Some(desc_str.to_owned());
rest = rest_tmp;
}
} else {
return Ok((
QuarterTurn {
main: move_pair.main,
modifier: move_pair.modifier,
extra_stalemate: move_pair.stalemate,
description,
alternatives,
},
rest,
));
};
rest = rest.trim_start();
while let Some(rest_tmp) = rest.strip_prefix('(') {
rest = rest_tmp;
let mut turns = Vec::new();
while rest.chars().next() != Some(')') {
let (turn, rest_tmp) = parse_turn(rest)?;
rest = rest_tmp;
turns.push(turn);
}
rest = rest.strip_prefix(')').unwrap().trim_start();
alternatives.push(turns);
}
Ok((
QuarterTurn {
main: move_pair.main,
modifier: move_pair.modifier,
extra_stalemate: move_pair.stalemate,
description,
alternatives,
},
rest,
))
}
fn
|
(string: &str) -> Result<(Turn, &str), IntermediateError> {
use IntermediateError::*;
let trimmed = string.trim_start();
let dot_loc = trimmed.find('.').ok_or(TurnNumber(trimmed.len()))?;
let (number_str, dots) = trimmed.split_at(dot_loc);
let number = if number_str == "" {
0
} else {
number_str
.parse()
.map_err(|_| TurnNumberParse(trimmed.len(), number_str.to_string()))?
};
let dot = dots.strip_prefix('.').unwrap();
let (mut rest, double_dot) = if let Some(dotted) = dot.strip_prefix('.') {
(dotted, true)
} else {
(dot, false)
};
let mut turns = Vec::new();
let for_error = rest.len();
let (qturn, rest_tmp) = parse_quarter(rest)?;
rest = rest_tmp.trim_start();
turns.push(qturn);
while let Some(rest_tmp) = rest.strip_prefix("..") {
if turns.len() >= 4 {
return Err(TurnTooLong(for_error));
}
let (qturn, rest_tmp) = parse_quarter(rest_tmp)?;
rest = rest_tmp.trim_start();
turns.push(qturn);
}
Ok((
Turn {
number,
double_dot,
turns,
},
rest,
))
}
#[derive(Error, PartialEq, Clone, Debug)]
pub enum PGN4Error {
#[error("Some error occured at {0}")]
Other(ErrorLocation),
#[error("Expected a turn number starting at {0}, but there isn't a dot")]
TurnNumber(ErrorLocation),
#[error("Turn number at {0} is malformed \"{1}\" should be a number or \"\"")]
TurnNumberParse(ErrorLocation, String),
#[error("More than 4 quarter turns are present in the turn starting at {0}")]
TurnTooLong(ErrorLocation),
#[error("Tag starting at {0} is malformed")]
BadTagged(ErrorLocation),
#[error("Move \"{1}\" at {2} failed to parse. {0}")]
BadMove(MoveError, String, ErrorLocation),
#[error("Description starting at {0} is malformed")]
BadDescription(ErrorLocation),
}
#[derive(PartialEq, Clone, Debug)]
pub struct ErrorLocation {
pub line: usize,
pub column: usize,
pub raw_offset: usize,
}
impl std::fmt::Display for ErrorLocation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "line {} column {}", self.line, self.column)
}
}
impl FromStr for PGN4 {
|
parse_turn
|
identifier_name
|
from_str.rs
|
checkless.len();
let (two_pos, promotion) = if let Some(equals) = checkless.find('=') {
let (left_over, promote) = checkless.split_at(equals);
let mut iter = promote.chars();
if iter.next() != Some('=') {
return Err(MoveError::Other);
}
let p = iter.next().ok_or(MoveError::Other)?;
if iter.next().is_some() {
return Err(MoveError::Other);
}
(left_over, Some(p))
} else {
(checkless, None)
};
let loc = if let Some(dash) = two_pos.find('-') {
dash
} else if let Some(x) = two_pos.find('x') {
x
} else {
return Err(MoveError::Other);
};
let (left, tmp) = two_pos.split_at(loc);
let (mid, mut right) = tmp.split_at(1); // x and - are both ascii and therefore 1 byte
let from = left.parse::<Position>()?;
let captured = if mid == "x" {
let mut iter = right.chars();
let start = iter.next().ok_or(MoveError::Other)?;
Some(if start.is_ascii_lowercase() {
'P'
} else {
right = iter.as_str();
start
})
} else {
None
};
let to = right.parse::<Position>()?;
Ok(BasicMove {
piece,
from,
captured,
to,
promotion,
checks,
mates,
})
}
}
impl FromStr for Move {
type Err = MoveError;
fn from_str(string: &str) -> Result<Self, Self::Err> {
use Move::*;
Ok(match string {
"#" => Checkmate,
"S" => Stalemate,
"T" => Timeout,
"R" => Resign,
s if s.starts_with("O-O") =>
|
_ => Normal(string.parse::<BasicMove>()?),
})
}
}
struct MovePair {
main: Move,
modifier: Option<Move>,
stalemate: bool,
}
impl FromStr for MovePair {
type Err = MoveError;
fn from_str(string: &str) -> Result<Self, Self::Err> {
let mut stalemate = false;
let break_index = if string.len() == 2 {
1 // No move is 2 bytes long
} else if string.len() > 2 {
if string.ends_with("RS") && !string.ends_with("=RS")
|| string.ends_with("TS") && !string.ends_with("=TS")
{
stalemate = true;
string.len() - 2
} else if (string.ends_with('R') && !string.ends_with("=R"))
|| (string.ends_with('S') && !string.ends_with("=S"))
|| (string.ends_with('T') && !string.ends_with("=T"))
{
string.len() - 1
} else {
0
}
} else {
0
};
Ok(if break_index == 0 {
Self {
main: string.parse()?,
modifier: None,
stalemate,
}
} else {
Self {
main: string.get(..break_index).ok_or(MoveError::Other)?.parse()?,
modifier: Some(
string
.get(break_index..(break_index + 1))
.ok_or(MoveError::Other)?
.parse()?,
),
stalemate,
}
})
}
}
#[derive(PartialEq, Clone, Debug)]
enum IntermediateError {
Other(usize),
TurnNumber(usize),
TurnNumberParse(usize, String),
TurnTooLong(usize),
MoveErr(MoveError, String, usize),
Description(usize),
}
fn parse_quarter(string: &str) -> Result<(QuarterTurn, &str), IntermediateError> {
/// Generally the move is bounded by whitespace, but supporting pgns that don't
/// have all the neccessary whitespace is good. Notably, whitespace before a new
/// line number is critical.
fn next_move(c: char) -> bool {
c.is_whitespace()
|| match c {
'.' | '{' | '(' | ')' => true,
_ => false,
}
}
use IntermediateError::*;
let trimmed = string.trim_start();
if trimmed == "" {
return Err(Other(trimmed.len()));
}
let split = trimmed.find(next_move).unwrap_or(string.len() - 1);
let (main_str, mut rest) = trimmed.split_at(split);
let move_pair = main_str
.trim()
.parse::<MovePair>()
.map_err(|m| MoveErr(m, main_str.to_owned(), rest.len()))?;
let mut description = None;
let mut alternatives = Vec::new();
rest = rest.trim_start();
if let Some(c) = rest.chars().next() {
if c == '{' {
let desc_end = rest.find('}').ok_or(Description(rest.len()))?;
let (mut desc_str, rest_tmp) = rest.split_at(desc_end + 1);
desc_str = desc_str.strip_prefix("{ ").ok_or(Description(rest.len()))?;
desc_str = desc_str.strip_suffix(" }").ok_or(Description(rest.len()))?;
description = Some(desc_str.to_owned());
rest = rest_tmp;
}
} else {
return Ok((
QuarterTurn {
main: move_pair.main,
modifier: move_pair.modifier,
extra_stalemate: move_pair.stalemate,
description,
alternatives,
},
rest,
));
};
rest = rest.trim_start();
while let Some(rest_tmp) = rest.strip_prefix('(') {
rest = rest_tmp;
let mut turns = Vec::new();
while rest.chars().next() != Some(')') {
let (turn, rest_tmp) = parse_turn(rest)?;
rest = rest_tmp;
turns.push(turn);
}
rest = rest.strip_prefix(')').unwrap().trim_start();
alternatives.push(turns);
}
Ok((
QuarterTurn {
main: move_pair.main,
modifier: move_pair.modifier,
extra_stalemate: move_pair.stalemate,
description,
alternatives,
},
rest,
))
}
fn parse_turn(string: &str) -> Result<(Turn, &str), IntermediateError> {
use IntermediateError::*;
let trimmed = string.trim_start();
let dot_loc = trimmed.find('.').ok_or(TurnNumber(trimmed.len()))?;
let (number_str, dots) = trimmed.split_at(dot_loc);
let number = if number_str == "" {
0
} else {
number_str
.parse()
.map_err(|_| TurnNumberParse(trimmed.len(), number_str.to_string()))?
};
let dot = dots.strip_prefix('.').unwrap();
let (mut rest, double_dot) = if let Some(dotted) = dot.strip_prefix('.') {
(dotted, true)
} else {
(dot, false)
};
let mut turns = Vec::new();
let for_error = rest.len();
let (qturn, rest_tmp) = parse_quarter(rest)?;
rest = rest_tmp.trim_start();
turns.push(qturn);
while let Some(rest_tmp) = rest.strip_prefix("..") {
if turns.len() >= 4 {
return Err(TurnTooLong(for_error));
}
let (qturn, rest_tmp) = parse_quarter(rest_tmp)?;
rest = rest_tmp.trim_start();
turns.push(qturn);
}
Ok((
Turn {
number,
double_dot,
turns,
},
rest,
))
}
#[derive(Error, PartialEq, Clone, Debug)]
pub enum PGN4Error {
#[error("Some error occured at {0}")]
Other(ErrorLocation),
#[error("Expected a turn number starting at {0}, but there isn't a dot")]
TurnNumber(ErrorLocation),
#[error("Turn number at {0} is malformed \"{1}\" should be a number or \"\"")]
TurnNumberParse(ErrorLocation, String),
#[error("More than 4 quarter turns are present in the turn starting at {0}")]
TurnTooLong(ErrorLocation),
#[error("Tag starting at {0} is malformed")]
BadTagged(ErrorLocation),
#[error("Move \"{1}\" at {2} failed to parse. {0}")]
BadMove(MoveError, String, ErrorLocation),
#[error("Description starting at {0} is malformed")]
BadDescription(ErrorLocation),
}
#[derive(PartialEq, Clone, Debug)]
pub struct ErrorLocation {
pub line: usize,
pub column: usize,
pub raw_offset: usize,
}
impl std::fmt::Display for ErrorLocation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "line {} column {}", self.line, self.column)
}
}
impl FromStr for PGN4
|
{
let mateless = s.trim_end_matches('#');
let mates = s.len() - mateless.len();
match mateless {
"O-O-O" => QueenCastle(mates),
"O-O" => KingCastle(mates),
_ => return Err(MoveError::Castle),
}
}
|
conditional_block
|
from_str.rs
|
- checkless.len();
let (two_pos, promotion) = if let Some(equals) = checkless.find('=') {
let (left_over, promote) = checkless.split_at(equals);
let mut iter = promote.chars();
if iter.next() != Some('=') {
return Err(MoveError::Other);
}
let p = iter.next().ok_or(MoveError::Other)?;
if iter.next().is_some() {
return Err(MoveError::Other);
}
(left_over, Some(p))
} else {
(checkless, None)
};
let loc = if let Some(dash) = two_pos.find('-') {
dash
} else if let Some(x) = two_pos.find('x') {
x
} else {
return Err(MoveError::Other);
};
let (left, tmp) = two_pos.split_at(loc);
let (mid, mut right) = tmp.split_at(1); // x and - are both ascii and therefore 1 byte
let from = left.parse::<Position>()?;
let captured = if mid == "x" {
let mut iter = right.chars();
let start = iter.next().ok_or(MoveError::Other)?;
Some(if start.is_ascii_lowercase() {
'P'
} else {
right = iter.as_str();
start
})
} else {
None
};
let to = right.parse::<Position>()?;
Ok(BasicMove {
piece,
from,
captured,
to,
promotion,
checks,
mates,
})
}
}
impl FromStr for Move {
type Err = MoveError;
fn from_str(string: &str) -> Result<Self, Self::Err> {
use Move::*;
Ok(match string {
"#" => Checkmate,
"S" => Stalemate,
"T" => Timeout,
"R" => Resign,
s if s.starts_with("O-O") => {
let mateless = s.trim_end_matches('#');
let mates = s.len() - mateless.len();
match mateless {
"O-O-O" => QueenCastle(mates),
"O-O" => KingCastle(mates),
_ => return Err(MoveError::Castle),
}
}
_ => Normal(string.parse::<BasicMove>()?),
})
}
}
struct MovePair {
main: Move,
modifier: Option<Move>,
stalemate: bool,
}
impl FromStr for MovePair {
type Err = MoveError;
fn from_str(string: &str) -> Result<Self, Self::Err> {
let mut stalemate = false;
let break_index = if string.len() == 2 {
1 // No move is 2 bytes long
} else if string.len() > 2 {
if string.ends_with("RS") && !string.ends_with("=RS")
|| string.ends_with("TS") && !string.ends_with("=TS")
{
stalemate = true;
string.len() - 2
} else if (string.ends_with('R') && !string.ends_with("=R"))
|| (string.ends_with('S') && !string.ends_with("=S"))
|| (string.ends_with('T') && !string.ends_with("=T"))
{
string.len() - 1
} else {
0
}
} else {
0
};
Ok(if break_index == 0 {
Self {
main: string.parse()?,
modifier: None,
stalemate,
}
} else {
Self {
main: string.get(..break_index).ok_or(MoveError::Other)?.parse()?,
modifier: Some(
string
.get(break_index..(break_index + 1))
.ok_or(MoveError::Other)?
.parse()?,
),
stalemate,
}
})
}
}
#[derive(PartialEq, Clone, Debug)]
enum IntermediateError {
Other(usize),
TurnNumber(usize),
TurnNumberParse(usize, String),
TurnTooLong(usize),
MoveErr(MoveError, String, usize),
Description(usize),
}
fn parse_quarter(string: &str) -> Result<(QuarterTurn, &str), IntermediateError> {
/// Generally the move is bounded by whitespace, but supporting pgns that don't
/// have all the neccessary whitespace is good. Notably, whitespace before a new
/// line number is critical.
fn next_move(c: char) -> bool {
c.is_whitespace()
|| match c {
'.' | '{' | '(' | ')' => true,
_ => false,
}
}
use IntermediateError::*;
let trimmed = string.trim_start();
if trimmed == "" {
return Err(Other(trimmed.len()));
}
let split = trimmed.find(next_move).unwrap_or(string.len() - 1);
let (main_str, mut rest) = trimmed.split_at(split);
let move_pair = main_str
.trim()
.parse::<MovePair>()
.map_err(|m| MoveErr(m, main_str.to_owned(), rest.len()))?;
let mut description = None;
let mut alternatives = Vec::new();
rest = rest.trim_start();
if let Some(c) = rest.chars().next() {
if c == '{' {
let desc_end = rest.find('}').ok_or(Description(rest.len()))?;
let (mut desc_str, rest_tmp) = rest.split_at(desc_end + 1);
desc_str = desc_str.strip_prefix("{ ").ok_or(Description(rest.len()))?;
desc_str = desc_str.strip_suffix(" }").ok_or(Description(rest.len()))?;
description = Some(desc_str.to_owned());
rest = rest_tmp;
}
} else {
return Ok((
QuarterTurn {
main: move_pair.main,
modifier: move_pair.modifier,
extra_stalemate: move_pair.stalemate,
description,
alternatives,
},
rest,
));
};
rest = rest.trim_start();
while let Some(rest_tmp) = rest.strip_prefix('(') {
rest = rest_tmp;
let mut turns = Vec::new();
while rest.chars().next() != Some(')') {
let (turn, rest_tmp) = parse_turn(rest)?;
rest = rest_tmp;
turns.push(turn);
}
rest = rest.strip_prefix(')').unwrap().trim_start();
alternatives.push(turns);
}
Ok((
QuarterTurn {
main: move_pair.main,
modifier: move_pair.modifier,
extra_stalemate: move_pair.stalemate,
description,
|
},
rest,
))
}
fn parse_turn(string: &str) -> Result<(Turn, &str), IntermediateError> {
use IntermediateError::*;
let trimmed = string.trim_start();
let dot_loc = trimmed.find('.').ok_or(TurnNumber(trimmed.len()))?;
let (number_str, dots) = trimmed.split_at(dot_loc);
let number = if number_str == "" {
0
} else {
number_str
.parse()
.map_err(|_| TurnNumberParse(trimmed.len(), number_str.to_string()))?
};
let dot = dots.strip_prefix('.').unwrap();
let (mut rest, double_dot) = if let Some(dotted) = dot.strip_prefix('.') {
(dotted, true)
} else {
(dot, false)
};
let mut turns = Vec::new();
let for_error = rest.len();
let (qturn, rest_tmp) = parse_quarter(rest)?;
rest = rest_tmp.trim_start();
turns.push(qturn);
while let Some(rest_tmp) = rest.strip_prefix("..") {
if turns.len() >= 4 {
return Err(TurnTooLong(for_error));
}
let (qturn, rest_tmp) = parse_quarter(rest_tmp)?;
rest = rest_tmp.trim_start();
turns.push(qturn);
}
Ok((
Turn {
number,
double_dot,
turns,
},
rest,
))
}
#[derive(Error, PartialEq, Clone, Debug)]
pub enum PGN4Error {
#[error("Some error occured at {0}")]
Other(ErrorLocation),
#[error("Expected a turn number starting at {0}, but there isn't a dot")]
TurnNumber(ErrorLocation),
#[error("Turn number at {0} is malformed \"{1}\" should be a number or \"\"")]
TurnNumberParse(ErrorLocation, String),
#[error("More than 4 quarter turns are present in the turn starting at {0}")]
TurnTooLong(ErrorLocation),
#[error("Tag starting at {0} is malformed")]
BadTagged(ErrorLocation),
#[error("Move \"{1}\" at {2} failed to parse. {0}")]
BadMove(MoveError, String, ErrorLocation),
#[error("Description starting at {0} is malformed")]
BadDescription(ErrorLocation),
}
#[derive(PartialEq, Clone, Debug)]
pub struct ErrorLocation {
pub line: usize,
pub column: usize,
pub raw_offset: usize,
}
impl std::fmt::Display for ErrorLocation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "line {} column {}", self.line, self.column)
}
}
impl FromStr for PGN4 {
|
alternatives,
|
random_line_split
|
base.rs
|
key);
// Compute sizes of consecutive symbols if the size has not been provided by the symbol
// iterator. In the same go, drop all but the first symbols at any given address. We do
// not rely on the size of symbols in this case, since the ranges might still be
// overlapping.
symbols.dedup_by(|next, symbol| {
if symbol.size == 0 {
symbol.size = next.address - symbol.address;
}
symbol.address == next.address
})
}
SymbolMap { symbols }
}
}
impl<'d> FromIterator<Symbol<'d>> for SymbolMap<'d> {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = Symbol<'d>>,
{
Vec::from_iter(iter).into()
}
}
/// File information referred by [`LineInfo`](struct.LineInfo.html) comprising a directory and name.
///
/// The file path is usually relative to a compilation directory. It might contain parent directory
/// segments (`../`).
#[derive(Clone, Default, Eq, PartialEq)]
pub struct FileInfo<'data> {
/// The file's basename.
name: Cow<'data, [u8]>,
/// Path to the file.
dir: Cow<'data, [u8]>,
}
impl<'data> FileInfo<'data> {
/// Creates a `FileInfo` with a given directory and the file name.
#[cfg(feature = "dwarf")]
pub fn new(dir: Cow<'data, [u8]>, name: Cow<'data, [u8]>) -> Self {
FileInfo { name, dir }
}
/// Creates a `FileInfo` from a joined path by trying to split it.
#[cfg(any(feature = "breakpad", feature = "ms", feature = "sourcebundle"))]
pub fn from_path(path: &'data [u8]) -> Self {
let (dir, name) = symbolic_common::split_path_bytes(path);
FileInfo {
name: Cow::Borrowed(name),
dir: match dir {
Some(dir) => Cow::Borrowed(dir),
None => Cow::default(),
},
}
}
/// Creates a `FileInfo` from a joined path by trying to split it.
/// Unlike from_path(), copies the given data instead of referencing it.
#[cfg(feature = "ppdb")]
pub(crate) fn from_path_owned(path: &[u8]) -> Self {
let (dir, name) = symbolic_common::split_path_bytes(path);
FileInfo {
name: Cow::Owned(name.to_vec()),
dir: match dir {
Some(dir) => Cow::Owned(dir.to_vec()),
None => Cow::default(),
},
}
}
/// Creates a `FileInfo` with the file name.
pub fn from_filename(name: &'data [u8]) -> Self {
FileInfo {
name: Cow::Borrowed(name),
dir: Cow::default(),
}
}
/// The file name as UTF-8 string.
pub fn name_str(&self) -> Cow<'data, str> {
from_utf8_cow_lossy(&self.name)
}
/// Path to the file relative to the compilation directory.
pub fn dir_str(&self) -> Cow<'data, str> {
from_utf8_cow_lossy(&self.dir)
}
/// The full path to the file, relative to the compilation directory.
pub fn path_str(&self) -> String {
let joined = join_path(&self.dir_str(), &self.name_str());
clean_path(&joined).into_owned()
}
}
#[allow(clippy::ptr_arg)] // false positive https://github.com/rust-lang/rust-clippy/issues/9218
pub(crate) fn from_utf8_cow_lossy<'data>(input: &Cow<'data, [u8]>) -> Cow<'data, str> {
// See https://github.com/rust-lang/rust/issues/32669
match input {
Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes),
Cow::Owned(bytes) => match String::from_utf8_lossy(bytes) {
Cow::Borrowed(_) => unsafe { String::from_utf8_unchecked(bytes.to_vec()) }.into(),
Cow::Owned(s) => s.into(),
},
}
}
impl fmt::Debug for FileInfo<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FileInfo")
.field("name", &self.name_str())
.field("dir", &self.dir_str())
.finish()
}
}
/// File information comprising a compilation directory, relative path and name.
pub struct FileEntry<'data> {
/// Path to the compilation directory. File paths are relative to this.
compilation_dir: Cow<'data, [u8]>,
/// File name and path.
pub info: FileInfo<'data>,
}
impl<'data> FileEntry<'data> {
/// Path to the compilation directory.
pub fn new(compilation_dir: Cow<'data, [u8]>, info: FileInfo<'data>) -> Self {
FileEntry {
compilation_dir,
info,
}
}
/// Path to the compilation directory.
pub fn compilation_dir_str(&self) -> Cow<'data, str> {
from_utf8_cow_lossy(&self.compilation_dir)
}
/// Absolute path to the file, including the compilation directory.
pub fn abs_path_str(&self) -> String {
let joined_path = join_path(&self.dir_str(), &self.name_str());
let joined = join_path(&self.compilation_dir_str(), &joined_path);
clean_path(&joined).into_owned()
}
}
impl fmt::Debug for FileEntry<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FileInfo")
.field("compilation_dir", &self.compilation_dir_str())
.field("name", &self.name_str())
.field("dir", &self.dir_str())
.finish()
}
}
impl<'data> Deref for FileEntry<'data> {
type Target = FileInfo<'data>;
fn deref(&self) -> &Self::Target {
&self.info
}
}
/// File and line number mapping for an instruction address.
#[derive(Clone, Eq, PartialEq)]
pub struct LineInfo<'data> {
/// The instruction address relative to the image base (load address).
pub address: u64,
/// Total code size covered by this line record.
pub size: Option<u64>,
/// File name and path.
pub file: FileInfo<'data>,
/// Absolute line number starting at 1. Zero means no line number.
pub line: u64,
}
#[cfg(test)]
impl LineInfo<'static> {
pub(crate) fn new(address: u64, size: u64, file: &[u8], line: u64) -> LineInfo {
LineInfo {
address,
size: Some(size),
file: FileInfo::from_filename(file),
line,
}
}
}
impl fmt::Debug for LineInfo<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut s = f.debug_struct("LineInfo");
s.field("address", &format_args!("{:#x}", self.address));
match self.size {
Some(size) => s.field("size", &format_args!("{size:#x}")),
None => s.field("size", &self.size),
};
s.field("file", &self.file)
.field("line", &self.line)
.finish()
}
}
/// Debug information for a function.
#[derive(Clone)]
pub struct Function<'data> {
/// Relative instruction address of the start of the function.
pub address: u64,
/// Total code size covered by the function body, including inlined functions.
pub size: u64,
/// The name and language of the function symbol.
pub name: Name<'data>,
/// Path to the compilation directory. File paths are relative to this.
pub compilation_dir: &'data [u8],
/// Lines covered by this function, including inlined children.
pub lines: Vec<LineInfo<'data>>,
/// Functions that have been inlined into this function's body.
pub inlinees: Vec<Function<'data>>,
/// Specifies whether this function is inlined.
pub inline: bool,
}
impl Function<'_> {
/// End address of the entire function body, including inlined functions.
///
/// This address points at the first instruction after the function body.
pub fn end_address(&self) -> u64 {
self.address.saturating_add(self.size)
}
}
impl fmt::Debug for Function<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
|
{
f.debug_struct("Function")
.field("address", &format_args!("{:#x}", self.address))
.field("size", &format_args!("{:#x}", self.size))
.field("name", &self.name)
.field(
"compilation_dir",
&String::from_utf8_lossy(self.compilation_dir),
)
.field("lines", &self.lines)
.field("inlinees", &self.inlinees)
.field("inline", &self.inline)
.finish()
}
|
identifier_body
|
|
base.rs
|
FileFormat::Pdb,
"pe" => FileFormat::Pe,
"sourcebundle" => FileFormat::SourceBundle,
"wasm" => FileFormat::Wasm,
"portablepdb" => FileFormat::PortablePdb,
_ => return Err(UnknownFileFormatError),
})
}
}
/// A symbol from a symbol table.
#[derive(Clone, Default, Eq, PartialEq)]
pub struct Symbol<'data> {
/// The name of the symbol.
///
/// This name is generally mangled. It can be demangled by constructing a `Name` instance and
/// calling demangle on it. Certain object files might only store demangled symbol names.
pub name: Option<Cow<'data, str>>,
/// The relative address of this symbol.
pub address: u64,
/// The size of this symbol, if known.
///
/// When loading symbols from an object file, the size will generally not be known. Instead,
/// construct a [`SymbolMap`] from the object, which also fills in sizes.
///
/// [`SymbolMap`]: struct.SymbolMap.html
pub size: u64,
}
impl<'data> Symbol<'data> {
/// Returns the name of this symbol as string.
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(Cow::as_ref)
}
/// Determines whether the given address is covered by this symbol.
///
/// If the symbol size has not been computed, the address is assumed to be covered if it is
/// greated than the symbol address. Otherwise, the address must be in the half-open interval
/// `[address, address + size)`.
pub fn contains(&self, address: u64) -> bool {
address >= self.address && (self.size == 0 || address < self.address + self.size)
}
}
impl<'d> fmt::Debug for Symbol<'d> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Symbol")
.field("name", &self.name().unwrap_or("<unknown>"))
.field("address", &format_args!("{:#x}", self.address))
.field("size", &format_args!("{:#x}", self.size))
.finish()
}
}
/// IntoIterator type for [`SymbolMap`](struct.SymbolMap.html).
pub type SymbolMapIter<'data> = std::vec::IntoIter<Symbol<'data>>;
/// A sorted list of symbols, suitable for quick lookups.
///
/// This type can either be computed from a list or iterator of symbols, or preferrably directly
/// by calling [`ObjectLike::symbol_map`] on any object. Symbols in the symbol map are guaranteed to
/// have a `size` set, except for the last symbol, which is computed by taking the offset to the
/// subsequent symbol.
///
/// `SymbolMap` also exposes a read-only view on the sorted slice of symbols. It can be converted to
/// and from lists of symbols.
///
/// ## Example
///
/// ```rust
/// # use symbolic_debuginfo::{Symbol, SymbolMap};
/// let map = SymbolMap::from(vec![
/// Symbol { name: Some("A".into()), address: 0x4400, size: 0 },
/// Symbol { name: Some("B".into()), address: 0x4200, size: 0 },
/// Symbol { name: Some("C".into()), address: 0x4000, size: 0 },
/// ]);
///
/// assert_eq!(map[0], Symbol {
/// name: Some("C".into()),
/// address: 0x4000,
/// size: 0x200,
/// });
/// ```
///
/// [`ObjectLike::symbol_map`]: trait.ObjectLike.html#tymethod.symbol_map
#[derive(Clone, Debug, Default)]
pub struct SymbolMap<'data> {
symbols: Vec<Symbol<'data>>,
}
impl<'data> SymbolMap<'data> {
/// Creates a new, empty symbol map.
pub fn new() -> Self {
SymbolMap {
symbols: Vec::new(),
}
}
/// Looks up the symbol covering the given address.
pub fn lookup(&self, address: u64) -> Option<&Symbol<'data>> {
match self.symbols.binary_search_by_key(&address, Self::key) {
Ok(index) => Some(&self.symbols[index]),
Err(0) => None,
Err(next_index) => {
let symbol = &self.symbols[next_index - 1];
if symbol.contains(address) {
Some(symbol)
} else {
None
}
}
}
}
/// Looks up a symbol by its start address.
pub fn lookup_exact(&self, address: u64) -> Option<&Symbol<'data>> {
let idx = self
.symbols
.binary_search_by_key(&address, Self::key)
.ok()?;
self.symbols.get(idx)
}
/// Looks up a symbol covering an entire range.
///
/// This is similar to [`lookup`], but it only returns the symbol result if it _also_ covers the
/// inclusive end address of the range.
///
/// [`lookup`]: struct.SymbolMap.html#method.lookup
pub fn lookup_range<R>(&self, range: R) -> Option<&Symbol<'data>>
where
R: RangeBounds<u64>,
{
let start = match range.start_bound() {
Bound::Included(start) => *start,
Bound::Excluded(start) => *start + 1,
Bound::Unbounded => 0,
};
let symbol = self.lookup(start)?;
let end = match range.end_bound() {
Bound::Included(end) => *end,
Bound::Excluded(end) => *end - 1,
Bound::Unbounded => u64::max_value(),
};
if end <= start || symbol.contains(end) {
Some(symbol)
} else {
None
}
}
/// Returns the lookup key for a symbol, which is the symbol's address.
#[inline(always)]
fn key(symbol: &Symbol<'data>) -> u64 {
symbol.address
}
}
impl<'d> Deref for SymbolMap<'d> {
type Target = [Symbol<'d>];
fn deref(&self) -> &Self::Target {
&self.symbols
}
}
impl<'data> IntoIterator for SymbolMap<'data> {
type Item = Symbol<'data>;
type IntoIter = SymbolMapIter<'data>;
fn into_iter(self) -> Self::IntoIter {
self.symbols.into_iter()
}
}
impl<'data, 'a> IntoIterator for &'a SymbolMap<'data> {
type Item = &'a Symbol<'data>;
type IntoIter = std::slice::Iter<'a, Symbol<'data>>;
fn into_iter(self) -> Self::IntoIter {
self.symbols.iter()
}
}
impl<'d> AsRef<[Symbol<'d>]> for SymbolMap<'d> {
fn as_ref(&self) -> &[Symbol<'d>] {
&self.symbols
}
}
impl<'d> From<Vec<Symbol<'d>>> for SymbolMap<'d> {
fn from(mut symbols: Vec<Symbol<'d>>) -> Self {
if !symbols.is_empty() {
// NB: This might require stable sorting to ensure determinism if multiple symbols point
// at the same location. However, this only seems to happen for equivalent variants of
// the same function.
//
// An example would be destructors where D2 (base object destructor) and D1 (complete
// object destructor) might share the same code. Since those always demangle to the same
// name, we do not care which function to keep in this case.
//
// Inlined functions will generally not appear in this list, unless they _also_ have an
// explicit function body, in which case they will have a unique address, again.
dmsort::sort_by_key(&mut symbols, Self::key);
// Compute sizes of consecutive symbols if the size has not been provided by the symbol
// iterator. In the same go, drop all but the first symbols at any given address. We do
// not rely on the size of symbols in this case, since the ranges might still be
// overlapping.
symbols.dedup_by(|next, symbol| {
if symbol.size == 0 {
symbol.size = next.address - symbol.address;
}
symbol.address == next.address
})
}
SymbolMap { symbols }
}
}
impl<'d> FromIterator<Symbol<'d>> for SymbolMap<'d> {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = Symbol<'d>>,
{
Vec::from_iter(iter).into()
}
}
/// File information referred by [`LineInfo`](struct.LineInfo.html) comprising a directory and name.
///
/// The file path is usually relative to a compilation directory. It might contain parent directory
/// segments (`../`).
#[derive(Clone, Default, Eq, PartialEq)]
pub struct FileInfo<'data> {
/// The file's basename.
name: Cow<'data, [u8]>,
|
/// Path to the file.
dir: Cow<'data, [u8]>,
}
|
random_line_split
|
|
base.rs
|
0) => None,
Err(next_index) => {
let symbol = &self.symbols[next_index - 1];
if symbol.contains(address) {
Some(symbol)
} else {
None
}
}
}
}
/// Looks up a symbol by its start address.
pub fn lookup_exact(&self, address: u64) -> Option<&Symbol<'data>> {
let idx = self
.symbols
.binary_search_by_key(&address, Self::key)
.ok()?;
self.symbols.get(idx)
}
/// Looks up a symbol covering an entire range.
///
/// This is similar to [`lookup`], but it only returns the symbol result if it _also_ covers the
/// inclusive end address of the range.
///
/// [`lookup`]: struct.SymbolMap.html#method.lookup
pub fn lookup_range<R>(&self, range: R) -> Option<&Symbol<'data>>
where
R: RangeBounds<u64>,
{
let start = match range.start_bound() {
Bound::Included(start) => *start,
Bound::Excluded(start) => *start + 1,
Bound::Unbounded => 0,
};
let symbol = self.lookup(start)?;
let end = match range.end_bound() {
Bound::Included(end) => *end,
Bound::Excluded(end) => *end - 1,
Bound::Unbounded => u64::max_value(),
};
if end <= start || symbol.contains(end) {
Some(symbol)
} else {
None
}
}
/// Returns the lookup key for a symbol, which is the symbol's address.
#[inline(always)]
fn key(symbol: &Symbol<'data>) -> u64 {
symbol.address
}
}
impl<'d> Deref for SymbolMap<'d> {
type Target = [Symbol<'d>];
fn deref(&self) -> &Self::Target {
&self.symbols
}
}
impl<'data> IntoIterator for SymbolMap<'data> {
type Item = Symbol<'data>;
type IntoIter = SymbolMapIter<'data>;
fn into_iter(self) -> Self::IntoIter {
self.symbols.into_iter()
}
}
impl<'data, 'a> IntoIterator for &'a SymbolMap<'data> {
type Item = &'a Symbol<'data>;
type IntoIter = std::slice::Iter<'a, Symbol<'data>>;
fn into_iter(self) -> Self::IntoIter {
self.symbols.iter()
}
}
impl<'d> AsRef<[Symbol<'d>]> for SymbolMap<'d> {
fn as_ref(&self) -> &[Symbol<'d>] {
&self.symbols
}
}
impl<'d> From<Vec<Symbol<'d>>> for SymbolMap<'d> {
fn from(mut symbols: Vec<Symbol<'d>>) -> Self {
if !symbols.is_empty() {
// NB: This might require stable sorting to ensure determinism if multiple symbols point
// at the same location. However, this only seems to happen for equivalent variants of
// the same function.
//
// An example would be destructors where D2 (base object destructor) and D1 (complete
// object destructor) might share the same code. Since those always demangle to the same
// name, we do not care which function to keep in this case.
//
// Inlined functions will generally not appear in this list, unless they _also_ have an
// explicit function body, in which case they will have a unique address, again.
dmsort::sort_by_key(&mut symbols, Self::key);
// Compute sizes of consecutive symbols if the size has not been provided by the symbol
// iterator. In the same go, drop all but the first symbols at any given address. We do
// not rely on the size of symbols in this case, since the ranges might still be
// overlapping.
symbols.dedup_by(|next, symbol| {
if symbol.size == 0 {
symbol.size = next.address - symbol.address;
}
symbol.address == next.address
})
}
SymbolMap { symbols }
}
}
impl<'d> FromIterator<Symbol<'d>> for SymbolMap<'d> {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = Symbol<'d>>,
{
Vec::from_iter(iter).into()
}
}
/// File information referred by [`LineInfo`](struct.LineInfo.html) comprising a directory and name.
///
/// The file path is usually relative to a compilation directory. It might contain parent directory
/// segments (`../`).
#[derive(Clone, Default, Eq, PartialEq)]
pub struct FileInfo<'data> {
/// The file's basename.
name: Cow<'data, [u8]>,
/// Path to the file.
dir: Cow<'data, [u8]>,
}
impl<'data> FileInfo<'data> {
/// Creates a `FileInfo` with a given directory and the file name.
#[cfg(feature = "dwarf")]
pub fn new(dir: Cow<'data, [u8]>, name: Cow<'data, [u8]>) -> Self {
FileInfo { name, dir }
}
/// Creates a `FileInfo` from a joined path by trying to split it.
#[cfg(any(feature = "breakpad", feature = "ms", feature = "sourcebundle"))]
pub fn from_path(path: &'data [u8]) -> Self {
let (dir, name) = symbolic_common::split_path_bytes(path);
FileInfo {
name: Cow::Borrowed(name),
dir: match dir {
Some(dir) => Cow::Borrowed(dir),
None => Cow::default(),
},
}
}
/// Creates a `FileInfo` from a joined path by trying to split it.
/// Unlike from_path(), copies the given data instead of referencing it.
#[cfg(feature = "ppdb")]
pub(crate) fn from_path_owned(path: &[u8]) -> Self {
let (dir, name) = symbolic_common::split_path_bytes(path);
FileInfo {
name: Cow::Owned(name.to_vec()),
dir: match dir {
Some(dir) => Cow::Owned(dir.to_vec()),
None => Cow::default(),
},
}
}
/// Creates a `FileInfo` with the file name.
pub fn from_filename(name: &'data [u8]) -> Self {
FileInfo {
name: Cow::Borrowed(name),
dir: Cow::default(),
}
}
/// The file name as UTF-8 string.
pub fn name_str(&self) -> Cow<'data, str> {
from_utf8_cow_lossy(&self.name)
}
/// Path to the file relative to the compilation directory.
pub fn dir_str(&self) -> Cow<'data, str> {
from_utf8_cow_lossy(&self.dir)
}
/// The full path to the file, relative to the compilation directory.
pub fn path_str(&self) -> String {
let joined = join_path(&self.dir_str(), &self.name_str());
clean_path(&joined).into_owned()
}
}
#[allow(clippy::ptr_arg)] // false positive https://github.com/rust-lang/rust-clippy/issues/9218
pub(crate) fn from_utf8_cow_lossy<'data>(input: &Cow<'data, [u8]>) -> Cow<'data, str> {
// See https://github.com/rust-lang/rust/issues/32669
match input {
Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes),
Cow::Owned(bytes) => match String::from_utf8_lossy(bytes) {
Cow::Borrowed(_) => unsafe { String::from_utf8_unchecked(bytes.to_vec()) }.into(),
Cow::Owned(s) => s.into(),
},
}
}
impl fmt::Debug for FileInfo<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FileInfo")
.field("name", &self.name_str())
.field("dir", &self.dir_str())
.finish()
}
}
/// File information comprising a compilation directory, relative path and name.
pub struct FileEntry<'data> {
/// Path to the compilation directory. File paths are relative to this.
compilation_dir: Cow<'data, [u8]>,
/// File name and path.
pub info: FileInfo<'data>,
}
impl<'data> FileEntry<'data> {
/// Path to the compilation directory.
pub fn new(compilation_dir: Cow<'data, [u8]>, info: FileInfo<'data>) -> Self {
FileEntry {
compilation_dir,
info,
}
}
/// Path to the compilation directory.
pub fn compilation_dir_str(&self) -> Cow<'data, str> {
from_utf8_cow_lossy(&self.compilation_dir)
}
/// Absolute path to the file, including the compilation directory.
pub fn abs_path_str(&self) -> String {
let joined_path = join_path(&self.dir_str(), &self.name_str());
let joined = join_path(&self.compilation_dir_str(), &joined_path);
clean_path(&joined).into_owned()
}
}
impl fmt::Debug for FileEntry<'_> {
fn
|
fmt
|
identifier_name
|
|
mediaScan.go
|
go func(haltWalk bool) {
// Wait until signal recieved
<- walkCancelChan
// Halt fs walk
mutex.Lock()
haltWalk = true
mutex.Unlock()
}(haltWalk)
// Track metrics
artCount = 0
artistCount = 0
albumCount = 0
songCount = 0
songUpdate = 0
folderCount = 0
metadataCount = 0
startTime := time.Now()
folderCache = map[string]*db.Folder{}
artistCache = map[string]*db.Artist{}
albumCache = map[string]*db.Album{}
if fs.verbose {
util.Logger.Printf("FS: Scanning: %s", baseFolder)
}
godirwalk.Walk(baseFolder, &godirwalk.Options{
Callback: func(osPathname string, de *godirwalk.Dirent) error {
info := de.ModeType()
util.Logger.Printf("FS: Media Scan: Got new file: %s", osPathname)
folder, err := handleFolder(osPathname, info)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling folder: %s", err)
}
if _, ok := folderAttachables[folder.ID]; !ok {
folderAttachables[folder.ID] = attachables{}
}
ext := path.Ext(osPathname)
var isMetadata bool = false
if osPathname[len(osPathname)-len(metadataFile):] == metadataFile {
isMetadata = true
}
if img, audio := imgType[ext], audioType[ext];
!img && !audio && !isMetadata {
return nil
}
if _, ok := imgType[ext]; ok {
art, err := handleImg(osPathname, folder)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling image: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.art = art
folderAttachables[folder.ID] = currentVal
return nil
}
if isMetadata {
md, err := handleMetadata(osPathname, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling Metadata: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.md = md
folderAttachables[folder.ID] = currentVal
return nil
}
if _, ok := audioType[ext]; ok {
err := handleAudio(osPathname, info, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling audio file: %s", err)
}
}
return nil
},
ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction {
util.Logger.Printf("%s", err)
return godirwalk.SkipNode
},
Unsorted: true,
})
joinAttachables()
if err := db.DB.TruncateLog(); err != nil {
util.Logger.Printf("FS: Media Scan: Could not truncate WAL File: %v", err)
}
if fs.verbose {
util.Logger.Printf(
"FS: Media Scan Complete [time: %s]", time.Since(startTime).String())
util.Logger.Printf(
"FS: Media Scan: Added: " +
"[art: %d] [artists: %d] [albums: %d] [songs: %d] " +
"[folders: %d] [metadata: %d]",
artCount, artistCount, albumCount,
songCount, folderCount, metadataCount,
)
util.Logger.Printf("FS: Updated: [songs: %d]", songUpdate)
}
sum := artCount + artistCount + albumCount + songCount + folderCount
return sum, nil
}
func handleFolder(cPath string, info os.FileMode) (*db.Folder, error) {
// Check for cached folder
if seenFolder, ok := folderCache[cPath]; ok {
return seenFolder, nil
}
folder := new(db.Folder)
if info.IsDir() {
folder.Path = cPath
} else {
folder.Path = path.Dir(cPath)
}
err := folder.Load()
if err == nil {
folderCache[cPath] = folder
return folder, nil
}
if err == sql.ErrNoRows {
if _, err := os.Stat(path.Join(cPath, metadataFile)); os.IsNotExist(err) {
if _, err := os.Create(path.Join(cPath, metadataFile)); err != nil {
util.Logger.Printf("FS: Media Scan: Error creating Metadata file: %v", err)
} else {
util.Logger.Printf(
"FS: Media Scan: Handle Folder: Created new Metadata File at: %#v",
path.Join(cPath, metadataFile))
}
}
files, err := godirwalk.ReadDirents(folder.Path, nil)
if err != nil {
return nil, err
} else if len(files) == 0 {
return nil, fmt.Errorf(
"FS: Media Scan: Found no files in folder: %v", folder.Path)
}
util.Logger.Printf("FS: Media Scan: Found %v files in %v", len(files), cPath)
folder.Title = path.Base(folder.Path)
parent := new(db.Folder)
if info.IsDir() {
parent.Path = path.Dir(cPath)
} else {
parent.Path = path.Dir(path.Dir(cPath))
}
if err := parent.Load(); err != nil && err != sql.ErrNoRows {
return nil, err
} else if err == nil {
folder.ParentID = parent.ID
}
if err := folder.Save(); err != nil {
return nil, err
}
} else {
return nil, err
}
folderCache[folder.Path] = folder
folderCount++
return folder, nil
}
func handleImg(cPath string, folder *db.Folder) (*db.Art, error) {
art := new(db.Art)
art.Path = cPath
err := art.Load()
if err == nil {
return art, nil
}
if err == sql.ErrNoRows {
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
art.FolderID = folder.ID
art.FileSize = data.Size()
art.LastModified = data.ModTime().Unix()
if art.FileSize == 0 {
return nil, errors.New("Art File Size is 0")
}
if err := art.Save(); err != nil {
return nil, err
}
artCount++
return art, nil
}
return nil, err
}
func handleMetadata(cPath string, folder *db.Folder) (*db.Metadata, error) {
md := new(db.Metadata)
md.Path = cPath
err := md.Load()
if err == nil {
return md, nil
}
if err == sql.ErrNoRows
|
return nil, err
}
func handleAudio(cPath string, info os.FileMode, folder *db.Folder) error {
song, err := db.SongFromFile(cPath)
if err != nil {
return err
}
data, err := os.Stat(cPath)
if err != nil {
return err
}
if data.Size() == 0 {
return errors.New("Audio File Size is 0")
}
song.Path = cPath
song.FileSize = data.Size()
song.LastModified = data.ModTime().Unix()
song.FolderID = folder.ID
song.FileTypeID = db.FileTypeMap[path.Ext(cPath)]
artist, err := handleArtist(song)
if err != nil {
return err
}
song.ArtistID = artist.ID
fIDHasAttachables[artist.FolderID] = artist
album, err := handleAlbum(song)
if err != nil {
return err
}
song.AlbumID = album.ID
fIDHasAttachables[album.FolderID] = album
err = checkForModification(song)
if err != nil {
return err
}
return nil
}
func handleArtist(song *db.Song) (*db.Artist, error) {
artist := db.GetArtistFromSong(song)
if seenArtist, ok := artistCache[artist.Title]; ok {
return seenArtist, nil
}
err := artist.Load()
if err == nil {
artistCache[artist.Title] = artist
return artist, nil
|
{
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
md.FolderID = folder.ID
md.FileSize = data.Size()
md.LastModified = data.ModTime().Unix()
if err := md.Save(); err != nil {
return nil, err
}
metadataCount++
return md, nil
}
|
conditional_block
|
mediaScan.go
|
// SetFolders sets the base and sub folders for scanning
func (fs *MediaScan) SetFolders(baseFolder, subFolder string) {
fs.baseFolder = baseFolder
fs.subFolder = subFolder
}
// Verbose is whether scanning has verbose output or not
func (fs *MediaScan) Verbose(v bool) {
fs.verbose = v
}
// WhoAmI returns Media Scan
func (fs *MediaScan) WhoAmI() string {
return "Media Scan"
}
// Scan scans for media files in the filesystem
func (fs *MediaScan) Scan(
baseFolder, subFolder string, walkCancelChan chan struct{}) (int, error) {
// Halt file system walk if needed
var mutex sync.RWMutex
haltWalk := false
go func(haltWalk bool) {
// Wait until signal recieved
<- walkCancelChan
// Halt fs walk
mutex.Lock()
haltWalk = true
mutex.Unlock()
}(haltWalk)
// Track metrics
artCount = 0
artistCount = 0
albumCount = 0
songCount = 0
songUpdate = 0
folderCount = 0
metadataCount = 0
startTime := time.Now()
folderCache = map[string]*db.Folder{}
artistCache = map[string]*db.Artist{}
albumCache = map[string]*db.Album{}
if fs.verbose {
util.Logger.Printf("FS: Scanning: %s", baseFolder)
}
godirwalk.Walk(baseFolder, &godirwalk.Options{
Callback: func(osPathname string, de *godirwalk.Dirent) error {
info := de.ModeType()
util.Logger.Printf("FS: Media Scan: Got new file: %s", osPathname)
folder, err := handleFolder(osPathname, info)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling folder: %s", err)
}
if _, ok := folderAttachables[folder.ID]; !ok {
folderAttachables[folder.ID] = attachables{}
}
ext := path.Ext(osPathname)
var isMetadata bool = false
if osPathname[len(osPathname)-len(metadataFile):] == metadataFile {
isMetadata = true
}
if img, audio := imgType[ext], audioType[ext];
!img && !audio && !isMetadata {
return nil
}
if _, ok := imgType[ext]; ok {
art, err := handleImg(osPathname, folder)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling image: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.art = art
folderAttachables[folder.ID] = currentVal
return nil
}
if isMetadata {
md, err := handleMetadata(osPathname, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling Metadata: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.md = md
folderAttachables[folder.ID] = currentVal
return nil
}
if _, ok := audioType[ext]; ok {
err := handleAudio(osPathname, info, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling audio file: %s", err)
}
}
return nil
},
ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction {
util.Logger.Printf("%s", err)
return godirwalk.SkipNode
},
Unsorted: true,
})
joinAttachables()
if err := db.DB.TruncateLog(); err != nil {
util.Logger.Printf("FS: Media Scan: Could not truncate WAL File: %v", err)
}
if fs.verbose {
util.Logger.Printf(
"FS: Media Scan Complete [time: %s]", time.Since(startTime).String())
util.Logger.Printf(
"FS: Media Scan: Added: " +
"[art: %d] [artists: %d] [albums: %d] [songs: %d] " +
"[folders: %d] [metadata: %d]",
artCount, artistCount, albumCount,
songCount, folderCount, metadataCount,
)
util.Logger.Printf("FS: Updated: [songs: %d]", songUpdate)
}
sum := artCount + artistCount + albumCount + songCount + folderCount
return sum, nil
}
func handleFolder(cPath string, info os.FileMode) (*db.Folder, error) {
// Check for cached folder
if seenFolder, ok := folderCache[cPath]; ok {
return seenFolder, nil
}
folder := new(db.Folder)
if info.IsDir() {
folder.Path = cPath
} else {
folder.Path = path.Dir(cPath)
}
err := folder.Load()
if err == nil {
folderCache[cPath] = folder
return folder, nil
}
if err == sql.ErrNoRows {
if _, err := os.Stat(path.Join(cPath, metadataFile)); os.IsNotExist(err) {
if _, err := os.Create(path.Join(cPath, metadataFile)); err != nil {
util.Logger.Printf("FS: Media Scan: Error creating Metadata file: %v", err)
} else {
util.Logger.Printf(
"FS: Media Scan: Handle Folder: Created new Metadata File at: %#v",
path.Join(cPath, metadataFile))
}
}
files, err := godirwalk.ReadDirents(folder.Path, nil)
if err != nil {
return nil, err
} else if len(files) == 0 {
return nil, fmt.Errorf(
"FS: Media Scan: Found no files in folder: %v", folder.Path)
}
util.Logger.Printf("FS: Media Scan: Found %v files in %v", len(files), cPath)
folder.Title = path.Base(folder.Path)
parent := new(db.Folder)
if info.IsDir() {
parent.Path = path.Dir(cPath)
} else {
parent.Path = path.Dir(path.Dir(cPath))
}
if err := parent.Load(); err != nil && err != sql.ErrNoRows {
return nil, err
} else if err == nil {
folder.ParentID = parent.ID
}
if err := folder.Save(); err != nil {
return nil, err
}
} else {
return nil, err
}
folderCache[folder.Path] = folder
folderCount++
return folder, nil
}
func handleImg(cPath string, folder *db.Folder) (*db.Art, error) {
art := new(db.Art)
art.Path = cPath
err := art.Load()
if err == nil {
return art, nil
}
if err == sql.ErrNoRows {
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
art.FolderID = folder.ID
art.FileSize = data.Size()
art.LastModified = data.ModTime().Unix()
if art.FileSize == 0 {
return nil, errors.New("Art File Size is 0")
}
if err := art.Save(); err != nil {
return nil, err
}
artCount++
return art, nil
}
return nil, err
}
func handleMetadata(cPath string, folder *db.Folder) (*db.Metadata, error) {
md := new(db.Metadata)
md.Path = cPath
err := md.Load()
if err == nil {
return md, nil
}
if err == sql.ErrNoRows {
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
md.FolderID = folder.ID
md.FileSize = data.Size()
md.LastModified = data.ModTime().Unix()
if err := md.Save(); err != nil {
return nil, err
}
metadataCount++
return md, nil
}
return nil, err
}
func handleAudio(cPath string, info os.FileMode, folder *db.Folder) error {
song, err := db.SongFromFile(cPath)
if err != nil {
return err
}
data, err := os.Stat(cPath)
if err != nil {
return err
}
if data.Size() == 0 {
return errors.New("Audio File Size is 0")
}
song.Path = cPath
song.FileSize = data.Size()
song.LastModified = data.ModTime().Unix()
song.FolderID = folder.ID
song.FileTypeID = db.FileTypeMap[path.Ext(cPath)]
artist, err := handle
|
{
return fs.baseFolder, fs.subFolder
}
|
identifier_body
|
|
mediaScan.go
|
false
go func(haltWalk bool) {
// Wait until signal recieved
<- walkCancelChan
// Halt fs walk
mutex.Lock()
haltWalk = true
mutex.Unlock()
}(haltWalk)
// Track metrics
artCount = 0
artistCount = 0
albumCount = 0
songCount = 0
songUpdate = 0
folderCount = 0
metadataCount = 0
startTime := time.Now()
folderCache = map[string]*db.Folder{}
artistCache = map[string]*db.Artist{}
albumCache = map[string]*db.Album{}
if fs.verbose {
util.Logger.Printf("FS: Scanning: %s", baseFolder)
}
godirwalk.Walk(baseFolder, &godirwalk.Options{
Callback: func(osPathname string, de *godirwalk.Dirent) error {
info := de.ModeType()
util.Logger.Printf("FS: Media Scan: Got new file: %s", osPathname)
folder, err := handleFolder(osPathname, info)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling folder: %s", err)
}
if _, ok := folderAttachables[folder.ID]; !ok {
folderAttachables[folder.ID] = attachables{}
}
ext := path.Ext(osPathname)
var isMetadata bool = false
if osPathname[len(osPathname)-len(metadataFile):] == metadataFile {
isMetadata = true
}
if img, audio := imgType[ext], audioType[ext];
!img && !audio && !isMetadata {
return nil
}
if _, ok := imgType[ext]; ok {
art, err := handleImg(osPathname, folder)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling image: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.art = art
folderAttachables[folder.ID] = currentVal
return nil
}
if isMetadata {
md, err := handleMetadata(osPathname, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling Metadata: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.md = md
folderAttachables[folder.ID] = currentVal
return nil
}
if _, ok := audioType[ext]; ok {
err := handleAudio(osPathname, info, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling audio file: %s", err)
}
}
return nil
},
ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction {
util.Logger.Printf("%s", err)
return godirwalk.SkipNode
},
Unsorted: true,
})
joinAttachables()
if err := db.DB.TruncateLog(); err != nil {
util.Logger.Printf("FS: Media Scan: Could not truncate WAL File: %v", err)
}
if fs.verbose {
util.Logger.Printf(
"FS: Media Scan Complete [time: %s]", time.Since(startTime).String())
util.Logger.Printf(
"FS: Media Scan: Added: " +
"[art: %d] [artists: %d] [albums: %d] [songs: %d] " +
"[folders: %d] [metadata: %d]",
artCount, artistCount, albumCount,
songCount, folderCount, metadataCount,
)
util.Logger.Printf("FS: Updated: [songs: %d]", songUpdate)
}
sum := artCount + artistCount + albumCount + songCount + folderCount
return sum, nil
}
func handleFolder(cPath string, info os.FileMode) (*db.Folder, error) {
// Check for cached folder
if seenFolder, ok := folderCache[cPath]; ok {
return seenFolder, nil
}
folder := new(db.Folder)
if info.IsDir() {
folder.Path = cPath
} else {
folder.Path = path.Dir(cPath)
}
err := folder.Load()
if err == nil {
folderCache[cPath] = folder
return folder, nil
}
if err == sql.ErrNoRows {
if _, err := os.Stat(path.Join(cPath, metadataFile)); os.IsNotExist(err) {
if _, err := os.Create(path.Join(cPath, metadataFile)); err != nil {
util.Logger.Printf("FS: Media Scan: Error creating Metadata file: %v", err)
} else {
util.Logger.Printf(
"FS: Media Scan: Handle Folder: Created new Metadata File at: %#v",
path.Join(cPath, metadataFile))
}
}
files, err := godirwalk.ReadDirents(folder.Path, nil)
if err != nil {
return nil, err
} else if len(files) == 0 {
return nil, fmt.Errorf(
"FS: Media Scan: Found no files in folder: %v", folder.Path)
}
util.Logger.Printf("FS: Media Scan: Found %v files in %v", len(files), cPath)
folder.Title = path.Base(folder.Path)
parent := new(db.Folder)
if info.IsDir() {
parent.Path = path.Dir(cPath)
} else {
parent.Path = path.Dir(path.Dir(cPath))
}
if err := parent.Load(); err != nil && err != sql.ErrNoRows {
return nil, err
} else if err == nil {
folder.ParentID = parent.ID
}
if err := folder.Save(); err != nil {
return nil, err
}
} else {
return nil, err
}
folderCache[folder.Path] = folder
folderCount++
return folder, nil
}
func handleImg(cPath string, folder *db.Folder) (*db.Art, error) {
art := new(db.Art)
art.Path = cPath
err := art.Load()
if err == nil {
return art, nil
}
if err == sql.ErrNoRows {
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
art.FolderID = folder.ID
art.FileSize = data.Size()
art.LastModified = data.ModTime().Unix()
if art.FileSize == 0 {
return nil, errors.New("Art File Size is 0")
}
if err := art.Save(); err != nil {
return nil, err
}
artCount++
return art, nil
}
return nil, err
}
func handleMetadata(cPath string, folder *db.Folder) (*db.Metadata, error) {
md := new(db.Metadata)
md.Path = cPath
err := md.Load()
if err == nil {
return md, nil
}
if err == sql.ErrNoRows {
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
md.FolderID = folder.ID
md.FileSize = data.Size()
md.LastModified = data.ModTime().Unix()
if err := md.Save(); err != nil {
return nil, err
}
metadataCount++
return md, nil
}
return nil, err
}
func
|
(cPath string, info os.FileMode, folder *db.Folder) error {
song, err := db.SongFromFile(cPath)
if err != nil {
return err
}
data, err := os.Stat(cPath)
if err != nil {
return err
}
if data.Size() == 0 {
return errors.New("Audio File Size is 0")
}
song.Path = cPath
song.FileSize = data.Size()
song.LastModified = data.ModTime().Unix()
song.FolderID = folder.ID
song.FileTypeID = db.FileTypeMap[path.Ext(cPath)]
artist, err := handleArtist(song)
if err != nil {
return err
}
song.ArtistID = artist.ID
fIDHasAttachables[artist.FolderID] = artist
album, err := handleAlbum(song)
if err != nil {
return err
}
song.AlbumID = album.ID
fIDHasAttachables[album.FolderID] = album
err = checkForModification(song)
if err != nil {
return err
}
return nil
}
func handleArtist(song *db.Song) (*db.Artist, error) {
artist := db.GetArtistFromSong(song)
if seenArtist, ok := artistCache[artist.Title]; ok {
return seenArtist, nil
}
err := artist.Load()
if err == nil {
artistCache[artist.Title] = artist
return artist, nil
|
handleAudio
|
identifier_name
|
mediaScan.go
|
false
go func(haltWalk bool) {
// Wait until signal recieved
<- walkCancelChan
// Halt fs walk
mutex.Lock()
haltWalk = true
mutex.Unlock()
}(haltWalk)
// Track metrics
artCount = 0
artistCount = 0
albumCount = 0
songCount = 0
songUpdate = 0
folderCount = 0
metadataCount = 0
startTime := time.Now()
folderCache = map[string]*db.Folder{}
artistCache = map[string]*db.Artist{}
albumCache = map[string]*db.Album{}
if fs.verbose {
util.Logger.Printf("FS: Scanning: %s", baseFolder)
}
godirwalk.Walk(baseFolder, &godirwalk.Options{
Callback: func(osPathname string, de *godirwalk.Dirent) error {
info := de.ModeType()
util.Logger.Printf("FS: Media Scan: Got new file: %s", osPathname)
folder, err := handleFolder(osPathname, info)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling folder: %s", err)
}
if _, ok := folderAttachables[folder.ID]; !ok {
folderAttachables[folder.ID] = attachables{}
}
ext := path.Ext(osPathname)
var isMetadata bool = false
if osPathname[len(osPathname)-len(metadataFile):] == metadataFile {
isMetadata = true
}
if img, audio := imgType[ext], audioType[ext];
!img && !audio && !isMetadata {
return nil
}
if _, ok := imgType[ext]; ok {
art, err := handleImg(osPathname, folder)
if err != nil {
return fmt.Errorf("FS: Media Scan: Error handling image: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.art = art
|
if isMetadata {
md, err := handleMetadata(osPathname, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling Metadata: %s", err)
}
currentVal := folderAttachables[folder.ID]
currentVal.md = md
folderAttachables[folder.ID] = currentVal
return nil
}
if _, ok := audioType[ext]; ok {
err := handleAudio(osPathname, info, folder)
if err != nil {
return fmt.Errorf(
"FS: Media Scan: Error handling audio file: %s", err)
}
}
return nil
},
ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction {
util.Logger.Printf("%s", err)
return godirwalk.SkipNode
},
Unsorted: true,
})
joinAttachables()
if err := db.DB.TruncateLog(); err != nil {
util.Logger.Printf("FS: Media Scan: Could not truncate WAL File: %v", err)
}
if fs.verbose {
util.Logger.Printf(
"FS: Media Scan Complete [time: %s]", time.Since(startTime).String())
util.Logger.Printf(
"FS: Media Scan: Added: " +
"[art: %d] [artists: %d] [albums: %d] [songs: %d] " +
"[folders: %d] [metadata: %d]",
artCount, artistCount, albumCount,
songCount, folderCount, metadataCount,
)
util.Logger.Printf("FS: Updated: [songs: %d]", songUpdate)
}
sum := artCount + artistCount + albumCount + songCount + folderCount
return sum, nil
}
func handleFolder(cPath string, info os.FileMode) (*db.Folder, error) {
// Check for cached folder
if seenFolder, ok := folderCache[cPath]; ok {
return seenFolder, nil
}
folder := new(db.Folder)
if info.IsDir() {
folder.Path = cPath
} else {
folder.Path = path.Dir(cPath)
}
err := folder.Load()
if err == nil {
folderCache[cPath] = folder
return folder, nil
}
if err == sql.ErrNoRows {
if _, err := os.Stat(path.Join(cPath, metadataFile)); os.IsNotExist(err) {
if _, err := os.Create(path.Join(cPath, metadataFile)); err != nil {
util.Logger.Printf("FS: Media Scan: Error creating Metadata file: %v", err)
} else {
util.Logger.Printf(
"FS: Media Scan: Handle Folder: Created new Metadata File at: %#v",
path.Join(cPath, metadataFile))
}
}
files, err := godirwalk.ReadDirents(folder.Path, nil)
if err != nil {
return nil, err
} else if len(files) == 0 {
return nil, fmt.Errorf(
"FS: Media Scan: Found no files in folder: %v", folder.Path)
}
util.Logger.Printf("FS: Media Scan: Found %v files in %v", len(files), cPath)
folder.Title = path.Base(folder.Path)
parent := new(db.Folder)
if info.IsDir() {
parent.Path = path.Dir(cPath)
} else {
parent.Path = path.Dir(path.Dir(cPath))
}
if err := parent.Load(); err != nil && err != sql.ErrNoRows {
return nil, err
} else if err == nil {
folder.ParentID = parent.ID
}
if err := folder.Save(); err != nil {
return nil, err
}
} else {
return nil, err
}
folderCache[folder.Path] = folder
folderCount++
return folder, nil
}
func handleImg(cPath string, folder *db.Folder) (*db.Art, error) {
art := new(db.Art)
art.Path = cPath
err := art.Load()
if err == nil {
return art, nil
}
if err == sql.ErrNoRows {
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
art.FolderID = folder.ID
art.FileSize = data.Size()
art.LastModified = data.ModTime().Unix()
if art.FileSize == 0 {
return nil, errors.New("Art File Size is 0")
}
if err := art.Save(); err != nil {
return nil, err
}
artCount++
return art, nil
}
return nil, err
}
func handleMetadata(cPath string, folder *db.Folder) (*db.Metadata, error) {
md := new(db.Metadata)
md.Path = cPath
err := md.Load()
if err == nil {
return md, nil
}
if err == sql.ErrNoRows {
data, err := os.Stat(cPath)
if err != nil {
return nil, err
}
md.FolderID = folder.ID
md.FileSize = data.Size()
md.LastModified = data.ModTime().Unix()
if err := md.Save(); err != nil {
return nil, err
}
metadataCount++
return md, nil
}
return nil, err
}
func handleAudio(cPath string, info os.FileMode, folder *db.Folder) error {
song, err := db.SongFromFile(cPath)
if err != nil {
return err
}
data, err := os.Stat(cPath)
if err != nil {
return err
}
if data.Size() == 0 {
return errors.New("Audio File Size is 0")
}
song.Path = cPath
song.FileSize = data.Size()
song.LastModified = data.ModTime().Unix()
song.FolderID = folder.ID
song.FileTypeID = db.FileTypeMap[path.Ext(cPath)]
artist, err := handleArtist(song)
if err != nil {
return err
}
song.ArtistID = artist.ID
fIDHasAttachables[artist.FolderID] = artist
album, err := handleAlbum(song)
if err != nil {
return err
}
song.AlbumID = album.ID
fIDHasAttachables[album.FolderID] = album
err = checkForModification(song)
if err != nil {
return err
}
return nil
}
func handleArtist(song *db.Song) (*db.Artist, error) {
artist := db.GetArtistFromSong(song)
if seenArtist, ok := artistCache[artist.Title]; ok {
return seenArtist, nil
}
err := artist.Load()
if err == nil {
artistCache[artist.Title] = artist
return artist, nil
|
folderAttachables[folder.ID] = currentVal
return nil
}
|
random_line_split
|
widgets.js
|
{
type
}), "id");
// we're rarely going to have more than 2
// collaborators, so indexOf is fine
resultsParent.results = _.filter(results, (result) => {
return existingIdsOfType.indexOf(result.id) === -1;
});
// if there are no results, remove the category
// so that we get a "No results" thing
if (resultsParent.results.length === 0) {
delete response.results[resultsAttribute];
}
};
removeExisting("collaborations", "collaboration");
removeExisting("users", "user");
return response;
},
},
type: "category",
onSelect(result, response) {
let collabDescriptors = instance.data.collabDescriptors.get();
// only add if it doesn't already exist
if (_.pluck(collabDescriptors, "id").indexOf(result.id) === -1) {
collabDescriptors.push(result);
instance.data.collabDescriptors.set(collabDescriptors);
}
// clear the search input field and focus it (in case
// they used the mouse to click an option, which
// unfocuses the search input)
Meteor.defer(() => {
let searchInput = $(`${searchJquery} input`)[0];
searchInput.value = "";
searchInput.focus();
});
// clear the cache of searches so that we can remove
// the just-selected item from the results before displaying them
$(searchJquery).search("clear cache");
},
});
} else {
// destroy any possible old search
$(searchJquery).search("destroy");
}
});
});
Template.addCollaboratorSearch.helpers({
randomId() {
return Template.instance().randomId;
},
});
// Template.showErrorMessage
Template.showErrorMessage.helpers({
getError: function () {
return Template.instance().data.get();
},
});
Template.showErrorMessage.events({
"click .close-error-message": function (event, instance) {
instance.data.set(null);
},
});
// Template.contactUsButton
Template.contactUsButton.helpers({
emailSubject() {
return `MedBook%20Patient%20Care:%20${FlowRouter.current().path}`;
},
});
// Template.listSamplesButton
Template.listSamplesButton.onCreated(function () {
let instance = this;
instance.showMore = new ReactiveVar(false);
// set the showMore default value whenever the data changes
instance.autorun(() => {
let { sampleLabels } = Template.currentData();
if (sampleLabels) {
instance.showMore.set(sampleLabels.length <= 6);
}
});
});
Template.listSamplesButton.helpers({
showMore() { return Template.instance().showMore.get(); },
showStudyLabels() {
let { profile } = Meteor.user();
return profile && profile.showStudyLabels;
},
sampleToShow() {
let instance = Template.instance();
let { sampleLabels } = instance.data;
// remove study labels if necessary
let { profile } = Meteor.user();
if (!profile || !profile.showStudyLabels) {
sampleLabels = MedBook.utility.unqualifySampleLabels(sampleLabels);
}
// return either the whole list or the first couple items
if (instance.showMore.get()) {
if (instance.data.sampleLabels.length > 1000) {
return sampleLabels
.slice(0, 1000)
.concat([`... and ${sampleLabels.length - 1000} more samples`]);
}
return sampleLabels;
} else {
return sampleLabels
.slice(0, 3)
.concat([`... and ${sampleLabels.length - 3} more samples`]);
}
},
dropdownOptions() {
return {
action: "nothing"
};
},
alwaysShowAll() {
return this.sampleLabels && this.sampleLabels.length <= 6;
},
not(variable) {
return !variable;
},
tooManyToShowAll() {
return this.sampleLabels.length > 1000;
},
});
Template.listSamplesButton.events({
"click .toggle-list"(event, instance) {
instance.showMore.set(!instance.showMore.get());
},
"click .toggle-study-labels"(event, instance) {
let { profile } = Meteor.user();
let newValue = !profile || !profile.showStudyLabels;
Meteor.users.update(Meteor.userId(), {
$set: {
"profile.showStudyLabels": newValue
}
});
},
"click .download-list"(event, instance) {
let { sampleLabels } = instance.data;
// unqualify sample labels before downloading the list
let { profile } = Meteor.user();
if (!profile || !profile.showStudyLabels) {
sampleLabels = MedBook.utility.unqualifySampleLabels(sampleLabels);
}
saveStringAsFile(sampleLabels.join("\n"), instance.data.filename);
},
});
// Template.listFeaturesButton
let saveStringAsFile = function () {
// run this once and then return a function which knows about this a tag
var a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
return function (data, fileName) {
let blob = new Blob([data], { type: "text/plain" });
let url = window.URL.createObjectURL(blob);
a.href = url;
a.download = fileName;
a.click();
window.URL.revokeObjectURL(url);
};
}();
Template.listFeaturesButton.onCreated(function () {
let instance = this;
instance.showMore = new ReactiveVar(false);
// set the showMore default value whenever the data changes
instance.autorun(() => {
let { featureLabels } = Template.currentData();
if (featureLabels) {
instance.showMore.set(featureLabels.length <= 6);
}
});
});
Template.listFeaturesButton.helpers({
showMore() { return Template.instance().showMore.get(); },
featuresToShow() {
let instance = Template.instance();
let { featureLabels } = instance.data;
if (featureLabels) {
// return either the whole list or the first couple items
if (instance.showMore.get()) {
if (instance.data.featureLabels.length > 1000) {
return featureLabels
.slice(0, 1000)
.concat([`... and ${featureLabels.length - 1000} more features`]);
}
return featureLabels;
} else {
return featureLabels
.slice(0, 3)
.concat([`... and ${featureLabels.length - 3} more features`]);
}
}
},
tooManyToShowAll() {
return this.featureLabels.length > 1000;
},
});
Template.listFeaturesButton.events({
"click .toggle-list"(event, instance) {
instance.showMore.set(!instance.showMore.get());
},
"click .download-list"(event, instance) {
let text = instance.data.featureLabels.join("\n");
saveStringAsFile(text, instance.data.filename);
},
});
// Template.semanticUIDropdown
Template.semanticUIDropdown.onRendered(function () {
this.$(".ui.dropdown").dropdown(this.data.options);
});
// Template.semanticUICheckbox
Template.semanticUICheckbox.onRendered(function () {
this.$(".ui.checkbox").checkbox(this.data.options);
});
// Template.semanticUIAccordion
Template.semanticUIAccordion.onRendered(function () {
this.$(".ui.accordion").accordion(this.data);
});
// Template.semanticUIPopup
// can give:
// selector=".ui.popup.hi.yop"
// options={ option: "hi" }
Template.semanticUIPopup.onRendered(function () {
let { selector, options } = this.data;
if (!selector) {
console.error("Didn't give a selector to the semanticUIPopup");
} else {
this.$(selector).popup(options);
}
});
// Template.viewJobButton
Template.viewJobButton.onCreated(function () {
let instance = this;
instance.deleteClicked = new ReactiveVar(false);
});
Template.viewJobButton.onRendered(function () {
this.$(".ui.dropdown").dropdown({
// don't bold what's clicked
action: "nothing"
});
});
Template.viewJobButton.helpers({
capitalize(str) {
return str.charAt(0).toUpperCase() + str.slice(1);
},
buttonClass() {
if (this.job.status === "done") { return "primary"; }
else if (this.job.status === "error") { return "negative"; }
else if (this.job.status === "running") { return "secondary"; }
// else { return "" }
},
});
Template.viewJobButton.events({
"click .share-job"(event, instance) {
Session.set("editCollaborationsCollection", "Jobs");
Session.set("editCollaborationsMongoIds", [this.job._id]);
$(".edit-collaborations-modal").modal("show");
},
"click .delete-job"(event, instance) {
var deleteClicked = instance.deleteClicked;
if (deleteClicked.get()) {
Meteor.call("removeObjects", "Jobs", [this.job._id]);
} else
|
{
deleteClicked.set(true);
// if they click elsewhere, cancel remove
// wait until propogation finishes before registering event handler
Meteor.defer(() => {
$("html").one("click", () => {
deleteClicked.set(false);
});
});
}
|
conditional_block
|
|
widgets.js
|
one has a random id assigned so the jquery doesn't interfere
instance.randomId = Random.id();
});
Template.addCollaboratorSearch.onRendered(function () {
let instance = this;
const searchJquery = `.${instance.randomId}.collaboration-search`;
// only initialize the collaboration search when the user is logged in
// because the API url depends on it the login token
instance.autorun(() => {
if (Meteor.user()) {
// destroy any possible old search
$(searchJquery).search("destroy");
// set up the collaboration search
$(searchJquery).search({
apiSettings: {
url: `${location.origin}/search/collaborations` +
`?token=${Accounts._storedLoginToken()}&q={query}`,
onResponse(response) {
// remove existing users/collaborations from the response
let allExisting = instance.data.collabDescriptors.get();
const removeExisting = (resultsAttribute, type) => {
// save the parent so we can set .results easily
const resultsParent = response.results[resultsAttribute];
const { results } = resultsParent;
const existingIdsOfType = _.pluck(_.where(allExisting, {
type
}), "id");
// we're rarely going to have more than 2
// collaborators, so indexOf is fine
resultsParent.results = _.filter(results, (result) => {
return existingIdsOfType.indexOf(result.id) === -1;
});
// if there are no results, remove the category
// so that we get a "No results" thing
if (resultsParent.results.length === 0) {
delete response.results[resultsAttribute];
}
};
removeExisting("collaborations", "collaboration");
removeExisting("users", "user");
return response;
},
},
type: "category",
onSelect(result, response) {
let collabDescriptors = instance.data.collabDescriptors.get();
// only add if it doesn't already exist
if (_.pluck(collabDescriptors, "id").indexOf(result.id) === -1) {
collabDescriptors.push(result);
instance.data.collabDescriptors.set(collabDescriptors);
}
// clear the search input field and focus it (in case
// they used the mouse to click an option, which
// unfocuses the search input)
Meteor.defer(() => {
let searchInput = $(`${searchJquery} input`)[0];
searchInput.value = "";
searchInput.focus();
});
// clear the cache of searches so that we can remove
// the just-selected item from the results before displaying them
$(searchJquery).search("clear cache");
},
});
} else {
// destroy any possible old search
$(searchJquery).search("destroy");
}
});
});
Template.addCollaboratorSearch.helpers({
randomId() {
return Template.instance().randomId;
},
});
// Template.showErrorMessage
Template.showErrorMessage.helpers({
getError: function () {
return Template.instance().data.get();
},
});
Template.showErrorMessage.events({
"click .close-error-message": function (event, instance) {
instance.data.set(null);
},
});
// Template.contactUsButton
Template.contactUsButton.helpers({
emailSubject() {
return `MedBook%20Patient%20Care:%20${FlowRouter.current().path}`;
},
});
// Template.listSamplesButton
Template.listSamplesButton.onCreated(function () {
let instance = this;
instance.showMore = new ReactiveVar(false);
// set the showMore default value whenever the data changes
instance.autorun(() => {
let { sampleLabels } = Template.currentData();
if (sampleLabels) {
instance.showMore.set(sampleLabels.length <= 6);
}
});
});
Template.listSamplesButton.helpers({
showMore() { return Template.instance().showMore.get(); },
showStudyLabels() {
let { profile } = Meteor.user();
return profile && profile.showStudyLabels;
},
sampleToShow() {
let instance = Template.instance();
let { sampleLabels } = instance.data;
// remove study labels if necessary
let { profile } = Meteor.user();
if (!profile || !profile.showStudyLabels) {
sampleLabels = MedBook.utility.unqualifySampleLabels(sampleLabels);
}
// return either the whole list or the first couple items
if (instance.showMore.get()) {
if (instance.data.sampleLabels.length > 1000) {
return sampleLabels
.slice(0, 1000)
.concat([`... and ${sampleLabels.length - 1000} more samples`]);
}
return sampleLabels;
} else {
return sampleLabels
.slice(0, 3)
.concat([`... and ${sampleLabels.length - 3} more samples`]);
}
},
dropdownOptions() {
return {
action: "nothing"
};
},
alwaysShowAll() {
return this.sampleLabels && this.sampleLabels.length <= 6;
},
not(variable) {
return !variable;
},
tooManyToShowAll() {
return this.sampleLabels.length > 1000;
},
});
Template.listSamplesButton.events({
"click .toggle-list"(event, instance) {
instance.showMore.set(!instance.showMore.get());
},
"click .toggle-study-labels"(event, instance) {
let { profile } = Meteor.user();
let newValue = !profile || !profile.showStudyLabels;
Meteor.users.update(Meteor.userId(), {
$set: {
"profile.showStudyLabels": newValue
}
});
},
"click .download-list"(event, instance) {
let { sampleLabels } = instance.data;
// unqualify sample labels before downloading the list
let { profile } = Meteor.user();
if (!profile || !profile.showStudyLabels) {
sampleLabels = MedBook.utility.unqualifySampleLabels(sampleLabels);
}
saveStringAsFile(sampleLabels.join("\n"), instance.data.filename);
},
});
// Template.listFeaturesButton
let saveStringAsFile = function () {
// run this once and then return a function which knows about this a tag
var a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
return function (data, fileName) {
let blob = new Blob([data], { type: "text/plain" });
let url = window.URL.createObjectURL(blob);
a.href = url;
a.download = fileName;
a.click();
window.URL.revokeObjectURL(url);
};
}();
Template.listFeaturesButton.onCreated(function () {
let instance = this;
instance.showMore = new ReactiveVar(false);
// set the showMore default value whenever the data changes
instance.autorun(() => {
let { featureLabels } = Template.currentData();
if (featureLabels) {
instance.showMore.set(featureLabels.length <= 6);
}
});
});
Template.listFeaturesButton.helpers({
showMore() { return Template.instance().showMore.get(); },
featuresToShow() {
let instance = Template.instance();
let { featureLabels } = instance.data;
if (featureLabels) {
// return either the whole list or the first couple items
if (instance.showMore.get()) {
if (instance.data.featureLabels.length > 1000) {
return featureLabels
.slice(0, 1000)
.concat([`... and ${featureLabels.length - 1000} more features`]);
}
return featureLabels;
} else {
return featureLabels
.slice(0, 3)
.concat([`... and ${featureLabels.length - 3} more features`]);
}
}
},
tooManyToShowAll() {
return this.featureLabels.length > 1000;
},
});
Template.listFeaturesButton.events({
"click .toggle-list"(event, instance) {
instance.showMore.set(!instance.showMore.get());
},
"click .download-list"(event, instance) {
let text = instance.data.featureLabels.join("\n");
saveStringAsFile(text, instance.data.filename);
},
});
// Template.semanticUIDropdown
Template.semanticUIDropdown.onRendered(function () {
this.$(".ui.dropdown").dropdown(this.data.options);
});
// Template.semanticUICheckbox
Template.semanticUICheckbox.onRendered(function () {
this.$(".ui.checkbox").checkbox(this.data.options);
});
// Template.semanticUIAccordion
Template.semanticUIAccordion.onRendered(function () {
this.$(".ui.accordion").accordion(this.data);
});
// Template.semanticUIPopup
// can give:
// selector=".ui.popup.hi.yop"
// options={ option: "hi" }
Template.semanticUIPopup.onRendered(function () {
let { selector, options } = this.data;
if (!selector) {
console.error("Didn't give a selector to the semanticUIPopup");
} else {
this.$(selector).popup(options);
}
});
// Template.viewJobButton
Template.viewJobButton.onCreated(function () {
let instance = this;
instance.deleteClicked = new ReactiveVar(false);
});
Template.viewJobButton.onRendered(function () {
this.$(".ui.dropdown").dropdown({
// don't bold what's clicked
action: "nothing"
});
});
|
Template.viewJobButton.helpers({
|
random_line_split
|
|
widgets.js
|
} more features`]);
}
}
},
tooManyToShowAll() {
return this.featureLabels.length > 1000;
},
});
Template.listFeaturesButton.events({
"click .toggle-list"(event, instance) {
instance.showMore.set(!instance.showMore.get());
},
"click .download-list"(event, instance) {
let text = instance.data.featureLabels.join("\n");
saveStringAsFile(text, instance.data.filename);
},
});
// Template.semanticUIDropdown
Template.semanticUIDropdown.onRendered(function () {
this.$(".ui.dropdown").dropdown(this.data.options);
});
// Template.semanticUICheckbox
Template.semanticUICheckbox.onRendered(function () {
this.$(".ui.checkbox").checkbox(this.data.options);
});
// Template.semanticUIAccordion
Template.semanticUIAccordion.onRendered(function () {
this.$(".ui.accordion").accordion(this.data);
});
// Template.semanticUIPopup
// can give:
// selector=".ui.popup.hi.yop"
// options={ option: "hi" }
Template.semanticUIPopup.onRendered(function () {
let { selector, options } = this.data;
if (!selector) {
console.error("Didn't give a selector to the semanticUIPopup");
} else {
this.$(selector).popup(options);
}
});
// Template.viewJobButton
Template.viewJobButton.onCreated(function () {
let instance = this;
instance.deleteClicked = new ReactiveVar(false);
});
Template.viewJobButton.onRendered(function () {
this.$(".ui.dropdown").dropdown({
// don't bold what's clicked
action: "nothing"
});
});
Template.viewJobButton.helpers({
capitalize(str) {
return str.charAt(0).toUpperCase() + str.slice(1);
},
buttonClass() {
if (this.job.status === "done") { return "primary"; }
else if (this.job.status === "error") { return "negative"; }
else if (this.job.status === "running") { return "secondary"; }
// else { return "" }
},
});
Template.viewJobButton.events({
"click .share-job"(event, instance) {
Session.set("editCollaborationsCollection", "Jobs");
Session.set("editCollaborationsMongoIds", [this.job._id]);
$(".edit-collaborations-modal").modal("show");
},
"click .delete-job"(event, instance) {
var deleteClicked = instance.deleteClicked;
if (deleteClicked.get()) {
Meteor.call("removeObjects", "Jobs", [this.job._id]);
} else {
deleteClicked.set(true);
// if they click elsewhere, cancel remove
// wait until propogation finishes before registering event handler
Meteor.defer(() => {
$("html").one("click", () => {
deleteClicked.set(false);
});
});
}
},
});
// Template.jobWrapper
Template.jobWrapper.onCreated(function () {
let instance = this;
// subscribe and keep up to date
instance.autorun(function () {
instance.subscribe("specificJob", Template.currentData().job_id);
});
});
Template.jobWrapper.helpers({
getJob() {
return Jobs.findOne(this.job_id);
},
onDeleteJob() {
let { listRoute } = Template.instance().data;
return function () {
FlowRouter.go(listRoute);
};
},
});
// Template.jobErrorBlobs
Template.jobErrorBlobs.onCreated(function () {
let instance = this;
instance.subscribe("blobsAssociatedWithObject", "Jobs", instance.data._id);
});
Template.jobErrorBlobs.helpers({
blobs() {
return Blobs2.find({}, { sort: { file_name: 1 } });
},
blobUrl() {
let userId = Meteor.userId();
let loginToken = Accounts._storedLoginToken();
let jobId = Template.instance().data._id;
return `/download/${userId}/${loginToken}/job-blob/${jobId}/` +
this.file_name;
}
});
Template.gseaJob.events({
"click .iframe-new-tab"(event, instance) {
// open the current iFrame URL in a new tab: magic!
console.log("this._id:", this._id);
window.open($("#" + this._id).contents().get(0).location.href, "_blank");
},
});
// Template.showRecords
Template.showRecords.onCreated(function () {
let instance = this;
let { mongoId, collectionName } = instance.data;
instance.gettingRecordsData = new ReactiveVar(true);
instance.recordsData = [];
Meteor.call("getRecords", collectionName, mongoId, (error, result) => {
if (error) { throw error; }
else {
instance.recordsData = result;
instance.gettingRecordsData.set(false);
}
});
});
Template.showRecords.helpers({
gettingRecordsData() {
return Template.instance().gettingRecordsData.get();
},
recordsData() {
return Template.instance().recordsData;
},
});
// Template.recordsHandsOnTable
Template.recordsHandsOnTable.onRendered(function () {
let instance = this;
let { recordsData, fields, primaryFieldName } = instance.data;
// calculate the spreadsheet columns
// always have the sample label field be first
let columns = [ { data: primaryFieldName } ];
let colHeaders = [ primaryFieldName ];
_.each(fields, (field) => {
if (field.name !== primaryFieldName) {
columns.push({ data: field.name });
colHeaders.push(field.name);
}
});
var container = document.getElementById('recordsHOT');
var hot = new Handsontable(container, {
data: recordsData,
startRows: fields.length,
startCols: recordsData.length,
columns,
colHeaders,
readOnly: true,
columnSorting: true,
});
let { hotPassback } = instance.data;
if (hotPassback) {
hotPassback.hotInstance = hot;
hotPassback.initialized.set(true);
}
});
Template.recordsHandsOnTable.helpers({
height() {
if (this.recordsData.length > 100) {
// make the table as tall as the viewfinder
// http://stackoverflow.com/a/16837667/1092640
return "100vh";
} else {
return "auto";
}
},
});
// Template.gseaFromGeneSetModal
// This modal depends on the geneSetIdForGsea query parameter.
Template.gseaFromGeneSetModal.onCreated(function () {
instance = this;
// if we're waiting for more than 10 seconds they probably don't have
// access to the gene set, so tell them
instance.permissionLikelyDenied = new ReactiveVar(false);
let lastTimeout;
// show the modal when the query param is set
instance.autorun(() => {
let geneSetId = FlowRouter.getQueryParam("geneSetIdForGsea");
// reset permissionLikelyDenied and any previous timeouts
instance.permissionLikelyDenied.set(false);
Meteor.clearTimeout(lastTimeout);
if (geneSetId) {
// start a timer to flip permission likely denied on if it hasn't loaded
lastTimeout = Meteor.setTimeout(() => {
if (!GeneSets.findOne(geneSetId)) {
instance.permissionLikelyDenied.set(true);
}
}, 5000);
}
});
});
Template.gseaFromGeneSetModal.onRendered(function () {
let instance = this;
instance.$(".gsea-from-gene-set.modal").modal({
// remove geneSetIdForGsea from the query parameters when it is closed
onHide() {
// Defer setting the query parameters. When a user navigates away from
// the page with the modal open (viewing a job, for example), the
// query parameter is cleared before the route changes. This means
// that when the user hits the back button, the query parameter won't
// exist and the modal won't open automatically. Deferring waits
// to clear the query param until the route has changed, which solves
// this bug.
Meteor.defer(() => {
FlowRouter.setQueryParams({
geneSetIdForGsea: null
});
});
},
observeChanges: true,
});
// show the modal when the query param is set
instance.autorun(() => {
let geneSetId = FlowRouter.getQueryParam("geneSetIdForGsea");
if (geneSetId) {
$(".gsea-from-gene-set.modal").modal("show");
} else {
$(".gsea-from-gene-set.modal").modal("hide");
}
});
});
Template.gseaFromGeneSetModal.helpers({
previousJobsCols() {
return [
{ title: "Ranking field", field: "args.gene_set_sort_field" },
{
title: "Gene sets",
func: function (job) {
return job.args.gene_set_group_names.join("\n");
},
fields: [ "args.gene_set_group_names" ],
},
];
},
query() {
return {
"args.gene_set_id": FlowRouter.getQueryParam("geneSetIdForGsea"),
};
},
getGeneSet() {
let geneSetId = FlowRouter.getQueryParam("geneSetIdForGsea");
if (geneSetId) {
return GeneSets.findOne(geneSetId);
}
},
|
extraFields
|
identifier_name
|
|
widgets.js
|
.instance().waitingForServer.get(); },
collabsList() { return Template.instance().collabsList; },
collectionName() { return Session.get("editCollaborationsCollection"); },
mongoIds() { return Session.get("editCollaborationsMongoIds"); },
});
// Template.listCollaborators
Template.listCollaborators.onCreated(function () {
let instance = this;
instance.dataLoading = new ReactiveVar(false);
// store the full descriptor objects here and pass the ids to
// instance.data.collabList
instance.collabDescriptors = new ReactiveVar([]);
// pass the collaboration names to the parent template whenever
// collabDescriptors changes
instance.autorun(() => {
let collabNames = _.pluck(instance.collabDescriptors.get(), "id");
if (instance.data.collabsList) {
instance.data.collabsList.set(collabNames);
} else {
console.error("forgot to pass listCollaborators collabsList");
}
});
// cache old values of mongoIds, collectionName so it doesn't rerun a bunch
let oldCollectionName, oldMongoIds;
// who the user can share with
instance.autorun(() => {
let { collectionName, mongoIds, attribute } = Template.currentData();
// get the correct collaborations for possibly many objects...
// Wait until we're logged-in because when the user refreshes there's
// a slight delay before logging in where it'll run this code and fail.
// Sometimes collectionName and mongoIds needs to load with a subscription,
// so wait until they're truthy.
// Don't run again if nothing's changed.
if (Meteor.userId() && collectionName && mongoIds &&
!(collectionName === oldCollectionName &&
_.isEqual(mongoIds, oldMongoIds))) {
oldCollectionName = collectionName;
oldMongoIds = mongoIds;
// for now show "data loading" UI
instance.dataLoading.set(true);
if (!attribute) {
attribute = "collaborations";
}
Meteor.call("getObjsCollabDescriptions", collectionName, mongoIds,
attribute, (error, result) => {
if (error) console.log("error:", error);
instance.collabDescriptors.set(result);
instance.dataLoading.set(false);
});
}
});
});
Template.listCollaborators.helpers({
collabsListFetched() {
return Template.instance().collabDescriptors.get();
},
collabDescriptors() {
return Template.instance().collabDescriptors;
},
not(thing) {
return !thing;
}
});
Template.listCollaborators.events({
"click .remove-collaboration"(event, instance) {
let collabDescriptors = instance.collabDescriptors.get();
collabDescriptors = _.filter(collabDescriptors, (collabDesc) => {
return collabDesc.id !== this.id;
});
instance.collabDescriptors.set(collabDescriptors);
},
});
// Template.addCollaboratorSearch
Template.addCollaboratorSearch.onCreated(function () {
let instance = this;
// each one has a random id assigned so the jquery doesn't interfere
instance.randomId = Random.id();
});
Template.addCollaboratorSearch.onRendered(function () {
let instance = this;
const searchJquery = `.${instance.randomId}.collaboration-search`;
// only initialize the collaboration search when the user is logged in
// because the API url depends on it the login token
instance.autorun(() => {
if (Meteor.user()) {
// destroy any possible old search
$(searchJquery).search("destroy");
// set up the collaboration search
$(searchJquery).search({
apiSettings: {
url: `${location.origin}/search/collaborations` +
`?token=${Accounts._storedLoginToken()}&q={query}`,
onResponse(response) {
// remove existing users/collaborations from the response
let allExisting = instance.data.collabDescriptors.get();
const removeExisting = (resultsAttribute, type) => {
// save the parent so we can set .results easily
const resultsParent = response.results[resultsAttribute];
const { results } = resultsParent;
const existingIdsOfType = _.pluck(_.where(allExisting, {
type
}), "id");
// we're rarely going to have more than 2
// collaborators, so indexOf is fine
resultsParent.results = _.filter(results, (result) => {
return existingIdsOfType.indexOf(result.id) === -1;
});
// if there are no results, remove the category
// so that we get a "No results" thing
if (resultsParent.results.length === 0) {
delete response.results[resultsAttribute];
}
};
removeExisting("collaborations", "collaboration");
removeExisting("users", "user");
return response;
},
},
type: "category",
onSelect(result, response) {
let collabDescriptors = instance.data.collabDescriptors.get();
// only add if it doesn't already exist
if (_.pluck(collabDescriptors, "id").indexOf(result.id) === -1) {
collabDescriptors.push(result);
instance.data.collabDescriptors.set(collabDescriptors);
}
// clear the search input field and focus it (in case
// they used the mouse to click an option, which
// unfocuses the search input)
Meteor.defer(() => {
let searchInput = $(`${searchJquery} input`)[0];
searchInput.value = "";
searchInput.focus();
});
// clear the cache of searches so that we can remove
// the just-selected item from the results before displaying them
$(searchJquery).search("clear cache");
},
});
} else {
// destroy any possible old search
$(searchJquery).search("destroy");
}
});
});
Template.addCollaboratorSearch.helpers({
randomId()
|
,
});
// Template.showErrorMessage
Template.showErrorMessage.helpers({
getError: function () {
return Template.instance().data.get();
},
});
Template.showErrorMessage.events({
"click .close-error-message": function (event, instance) {
instance.data.set(null);
},
});
// Template.contactUsButton
Template.contactUsButton.helpers({
emailSubject() {
return `MedBook%20Patient%20Care:%20${FlowRouter.current().path}`;
},
});
// Template.listSamplesButton
Template.listSamplesButton.onCreated(function () {
let instance = this;
instance.showMore = new ReactiveVar(false);
// set the showMore default value whenever the data changes
instance.autorun(() => {
let { sampleLabels } = Template.currentData();
if (sampleLabels) {
instance.showMore.set(sampleLabels.length <= 6);
}
});
});
Template.listSamplesButton.helpers({
showMore() { return Template.instance().showMore.get(); },
showStudyLabels() {
let { profile } = Meteor.user();
return profile && profile.showStudyLabels;
},
sampleToShow() {
let instance = Template.instance();
let { sampleLabels } = instance.data;
// remove study labels if necessary
let { profile } = Meteor.user();
if (!profile || !profile.showStudyLabels) {
sampleLabels = MedBook.utility.unqualifySampleLabels(sampleLabels);
}
// return either the whole list or the first couple items
if (instance.showMore.get()) {
if (instance.data.sampleLabels.length > 1000) {
return sampleLabels
.slice(0, 1000)
.concat([`... and ${sampleLabels.length - 1000} more samples`]);
}
return sampleLabels;
} else {
return sampleLabels
.slice(0, 3)
.concat([`... and ${sampleLabels.length - 3} more samples`]);
}
},
dropdownOptions() {
return {
action: "nothing"
};
},
alwaysShowAll() {
return this.sampleLabels && this.sampleLabels.length <= 6;
},
not(variable) {
return !variable;
},
tooManyToShowAll() {
return this.sampleLabels.length > 1000;
},
});
Template.listSamplesButton.events({
"click .toggle-list"(event, instance) {
instance.showMore.set(!instance.showMore.get());
},
"click .toggle-study-labels"(event, instance) {
let { profile } = Meteor.user();
let newValue = !profile || !profile.showStudyLabels;
Meteor.users.update(Meteor.userId(), {
$set: {
"profile.showStudyLabels": newValue
}
});
},
"click .download-list"(event, instance) {
let { sampleLabels } = instance.data;
// unqualify sample labels before downloading the list
let { profile } = Meteor.user();
if (!profile || !profile.showStudyLabels) {
sampleLabels = MedBook.utility.unqualifySampleLabels(sampleLabels);
}
saveStringAsFile(sampleLabels.join("\n"), instance.data.filename);
},
});
// Template.listFeaturesButton
let saveStringAsFile = function () {
// run this once and then return a function which knows about this a tag
var a = document.createElement("a");
document.body.appendChild(a);
a.style = "display: none";
return function (data
|
{
return Template.instance().randomId;
}
|
identifier_body
|
binder.go
|
) (types.Type, error) {
models := b.cfg.Models[name].Model
if len(models) == 0 {
return nil, fmt.Errorf(name + " not found in typemap")
}
if models[0] == "map[string]interface{}" {
return MapType, nil
}
if models[0] == "interface{}" {
return InterfaceType, nil
}
pkgName, typeName := code.PkgAndType(models[0])
if pkgName == "" {
return nil, fmt.Errorf("missing package name for %s", name)
}
obj, err := b.FindObject(pkgName, typeName)
if err != nil {
return nil, err
}
return obj.Type(), nil
}
func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) {
if pkgName == "" {
return nil, fmt.Errorf("package cannot be nil")
}
pkg := b.pkgs.LoadWithTypes(pkgName)
if pkg == nil {
err := b.pkgs.Errors()
if err != nil {
return nil, fmt.Errorf("package could not be loaded: %s.%s: %w", pkgName, typeName, err)
}
return nil, fmt.Errorf("required package was not loaded: %s.%s", pkgName, typeName)
}
if b.objectCache == nil {
b.objectCache = make(map[string]map[string]types.Object, b.pkgs.Count())
}
defsIndex, ok := b.objectCache[pkgName]
if !ok {
defsIndex = indexDefs(pkg)
b.objectCache[pkgName] = defsIndex
}
// function based marshalers take precedence
if val, ok := defsIndex["Marshal"+typeName]; ok {
return val, nil
}
if val, ok := defsIndex[typeName]; ok {
return val, nil
}
return nil, fmt.Errorf("%w: %s.%s", ErrTypeNotFound, pkgName, typeName)
}
func indexDefs(pkg *packages.Package) map[string]types.Object {
res := make(map[string]types.Object)
scope := pkg.Types.Scope()
for astNode, def := range pkg.TypesInfo.Defs {
// only look at defs in the top scope
if def == nil {
continue
}
parent := def.Parent()
if parent == nil || parent != scope {
continue
}
if _, ok := res[astNode.Name]; !ok {
// The above check may not be really needed, it is only here to have a consistent behavior with
// previous implementation of FindObject() function which only honored the first inclusion of a def.
// If this is still needed, we can consider something like sync.Map.LoadOrStore() to avoid two lookups.
res[astNode.Name] = def
}
}
return res
}
func (b *Binder) PointerTo(ref *TypeReference) *TypeReference {
newRef := *ref
newRef.GO = types.NewPointer(ref.GO)
b.References = append(b.References, &newRef)
return &newRef
}
// TypeReference is used by args and field types. The Definition can refer to both input and output types.
type TypeReference struct {
Definition *ast.Definition
GQL *ast.Type
GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
CastType types.Type // Before calling marshalling functions cast from/to this base type
Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
IsOmittable bool // Is the type wrapped with Omittable
IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
PointersInUmarshalInput bool // Inverse values and pointers in return.
}
func (ref *TypeReference) Elem() *TypeReference {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
newRef := *ref
newRef.GO = p.Elem()
return &newRef
}
if ref.IsSlice() {
newRef := *ref
newRef.GO = ref.GO.(*types.Slice).Elem()
newRef.GQL = ref.GQL.Elem
return &newRef
}
return nil
}
func (ref *TypeReference) IsPtr() bool {
_, isPtr := ref.GO.(*types.Pointer)
return isPtr
}
// fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful)
func (ref *TypeReference) IsPtrToPtr() bool {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
_, isPtr := p.Elem().(*types.Pointer)
return isPtr
}
return false
}
func (ref *TypeReference) IsNilable() bool {
return IsNilable(ref.GO)
}
func (ref *TypeReference) IsSlice() bool {
_, isSlice := ref.GO.(*types.Slice)
return ref.GQL.Elem != nil && isSlice
}
func (ref *TypeReference) IsPtrToSlice() bool {
if ref.IsPtr() {
_, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice)
return isPointerToSlice
}
return false
}
func (ref *TypeReference) IsPtrToIntf() bool {
if ref.IsPtr() {
_, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface)
return isPointerToInterface
}
return false
}
func (ref *TypeReference) IsNamed() bool {
_, isSlice := ref.GO.(*types.Named)
return isSlice
}
func (ref *TypeReference) IsStruct() bool {
_, isStruct := ref.GO.Underlying().(*types.Struct)
return isStruct
}
func (ref *TypeReference) IsScalar() bool {
return ref.Definition.Kind == ast.Scalar
}
func (ref *TypeReference) UniquenessKey() string {
nullability := "O"
if ref.GQL.NonNull {
nullability = "N"
}
elemNullability := ""
if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull {
// Fix for #896
elemNullability = "ᚄ"
}
return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability
}
func (ref *TypeReference) MarshalFunc() string {
|
func (ref *TypeReference) UnmarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if !ref.Definition.IsInputType() {
return ""
}
return "unmarshal" + ref.UniquenessKey()
}
func (ref *TypeReference) IsTargetNilable() bool {
return IsNilable(ref.Target)
}
func (b *Binder) PushRef(ret *TypeReference) {
b.References = append(b.References, ret)
}
func isMap(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Map)
return ok
}
func isIntf(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Interface)
return ok
}
func unwrapOmittable(t types.Type) (types.Type, bool) {
if t == nil {
return t, false
}
named, ok := t.(*types.Named)
if !ok {
return t, false
}
if named.Origin().String() != "github.com/99designs/gqlgen/graphql.Omittable[T any]" {
return t, false
}
return named.TypeArgs().At(0), true
}
func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) {
if innerType, ok := unwrapOmittable(bindTarget); ok {
if schemaType.NonNull {
return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name())
}
ref, err := b.TypeReference(schemaType, innerType)
if err != nil {
return nil, err
}
ref.IsOmittable = true
return ref, err
}
if !isValid(bindTarget) {
b.SawInvalid = true
return nil, fmt.Errorf("%s has an invalid type", schemaType.Name())
}
var pkgName, typeName string
def := b.schema.Types[schemaType.Name()]
defer func() {
if err == nil && ret != nil {
b.PushRef(ret)
}
}()
if len(b.cfg.Models[schemaType.Name()].Model) == 0 {
return nil, fmt.Errorf("%s was not found", schemaType.Name())
}
for
|
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if ref.Definition.Kind == ast.InputObject {
return ""
}
return "marshal" + ref.UniquenessKey()
}
|
identifier_body
|
binder.go
|
) (types.Type, error) {
models := b.cfg.Models[name].Model
if len(models) == 0 {
return nil, fmt.Errorf(name + " not found in typemap")
}
if models[0] == "map[string]interface{}" {
return MapType, nil
}
if models[0] == "interface{}" {
return InterfaceType, nil
}
pkgName, typeName := code.PkgAndType(models[0])
if pkgName == "" {
return nil, fmt.Errorf("missing package name for %s", name)
}
obj, err := b.FindObject(pkgName, typeName)
if err != nil {
return nil, err
}
return obj.Type(), nil
}
func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) {
if pkgName == "" {
return nil, fmt.Errorf("package cannot be nil")
}
pkg := b.pkgs.LoadWithTypes(pkgName)
if pkg == nil {
err := b.pkgs.Errors()
if err != nil {
return nil, fmt.Errorf("package could not be loaded: %s.%s: %w", pkgName, typeName, err)
}
return nil, fmt.Errorf("required package was not loaded: %s.%s", pkgName, typeName)
}
if b.objectCache == nil {
b.objectCache = make(map[string]map[string]types.Object, b.pkgs.Count())
}
defsIndex, ok := b.objectCache[pkgName]
if !ok {
defsIndex = indexDefs(pkg)
b.objectCache[pkgName] = defsIndex
}
// function based marshalers take precedence
if val, ok := defsIndex["Marshal"+typeName]; ok {
return val, nil
}
if val, ok := defsIndex[typeName]; ok {
return val, nil
}
return nil, fmt.Errorf("%w: %s.%s", ErrTypeNotFound, pkgName, typeName)
}
func indexDefs(pkg *packages.Package) map[string]types.Object {
res := make(map[string]types.Object)
scope := pkg.Types.Scope()
for astNode, def := range pkg.TypesInfo.Defs {
// only look at defs in the top scope
if def == nil {
continue
}
parent := def.Parent()
if parent == nil || parent != scope {
continue
}
if _, ok := res[astNode.Name]; !ok {
// The above check may not be really needed, it is only here to have a consistent behavior with
// previous implementation of FindObject() function which only honored the first inclusion of a def.
// If this is still needed, we can consider something like sync.Map.LoadOrStore() to avoid two lookups.
res[astNode.Name] = def
}
}
return res
}
func (b *Binder) PointerTo(ref *TypeReference) *TypeReference {
newRef := *ref
newRef.GO = types.NewPointer(ref.GO)
b.References = append(b.References, &newRef)
return &newRef
}
// TypeReference is used by args and field types. The Definition can refer to both input and output types.
type TypeReference struct {
Definition *ast.Definition
GQL *ast.Type
GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
CastType types.Type // Before calling marshalling functions cast from/to this base type
Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
IsOmittable bool // Is the type wrapped with Omittable
IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
PointersInUmarshalInput bool // Inverse values and pointers in return.
}
func (ref *TypeReference) Elem() *TypeReference {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
newRef := *ref
newRef.GO = p.Elem()
return &newRef
}
if ref.IsSlice() {
newRef := *ref
newRef.GO = ref.GO.(*types.Slice).Elem()
newRef.GQL = ref.GQL.Elem
return &newRef
}
return nil
}
func (ref *TypeReference) IsPtr() bool {
_, isPtr := ref.GO.(*types.Pointer)
return isPtr
}
// fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful)
func (ref *TypeReference) IsPtrToPtr() bool {
if p, isPtr := ref.GO.(*types.Pointer); isPtr {
_, isPtr := p.Elem().(*types.Pointer)
return isPtr
}
return false
}
func (ref *TypeReference) IsNilable() bool {
return IsNilable(ref.GO)
}
func (ref *TypeReference) IsSlice() bool {
_, isSlice := ref.GO.(*types.Slice)
return ref.GQL.Elem != nil && isSlice
}
func (ref *TypeReference) IsPtrToSlice() bool {
if ref.IsPtr() {
_, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice)
return isPointerToSlice
}
return false
}
func (ref *TypeReference) IsPtrToIntf() bool {
if ref.IsPtr() {
_, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface)
return isPointerToInterface
}
return false
}
func (ref *TypeReference) IsNamed() bool {
_, isSlice := ref.GO.(*types.Named)
return isSlice
}
func (ref *TypeReference) IsStruct() bool {
_, isStruct := ref.GO.Underlying().(*types.Struct)
return isStruct
}
func (ref *TypeReference) IsScalar() bool {
return ref.Definition.Kind == ast.Scalar
}
func (ref *TypeReference) UniquenessKey() string {
nullability := "O"
if ref.GQL.NonNull {
nullability = "N"
}
elemNullability := ""
if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull {
// Fix for #896
elemNullability = "ᚄ"
}
return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability
}
func (ref *TypeReference) MarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if ref.Definition.Kind == ast.InputObject {
return ""
}
return "marshal" + ref.UniquenessKey()
}
func (ref *TypeReference) UnmarshalFunc() string {
if ref.Definition == nil {
panic(errors.New("Definition missing for " + ref.GQL.Name()))
}
if !ref.Definition.IsInputType() {
return ""
}
return "unmarshal" + ref.UniquenessKey()
}
func (ref *TypeReference) IsTargetNilable() bool {
return IsNilable(ref.Target)
}
func (b *Binder) PushRef(ret *TypeReference) {
b.References = append(b.References, ret)
}
func isMap(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Map)
return ok
}
func isIntf(t types.Type) bool {
if t == nil {
return true
}
_, ok := t.(*types.Interface)
return ok
}
func un
|
types.Type) (types.Type, bool) {
if t == nil {
return t, false
}
named, ok := t.(*types.Named)
if !ok {
return t, false
}
if named.Origin().String() != "github.com/99designs/gqlgen/graphql.Omittable[T any]" {
return t, false
}
return named.TypeArgs().At(0), true
}
func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) {
if innerType, ok := unwrapOmittable(bindTarget); ok {
if schemaType.NonNull {
return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name())
}
ref, err := b.TypeReference(schemaType, innerType)
if err != nil {
return nil, err
}
ref.IsOmittable = true
return ref, err
}
if !isValid(bindTarget) {
b.SawInvalid = true
return nil, fmt.Errorf("%s has an invalid type", schemaType.Name())
}
var pkgName, typeName string
def := b.schema.Types[schemaType.Name()]
defer func() {
if err == nil && ret != nil {
b.PushRef(ret)
}
}()
if len(b.cfg.Models[schemaType.Name()].Model) == 0 {
return nil, fmt.Errorf("%s was not found", schemaType.Name())
}
|
wrapOmittable(t
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.