id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,800 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/portion/const.py
|
portion.const._PInf
|
class _PInf(_Singleton):
"""
Represent positive infinity.
"""
def __neg__(self):
return _NInf()
def __lt__(self, o):
return False
def __le__(self, o):
return isinstance(o, _PInf)
def __gt__(self, o):
return not isinstance(o, _PInf)
def __ge__(self, o):
return True
def __eq__(self, o):
return isinstance(o, _PInf)
def __repr__(self):
return "+inf"
def __hash__(self):
return hash(float("+inf"))
|
class _PInf(_Singleton):
'''
Represent positive infinity.
'''
def __neg__(self):
pass
def __lt__(self, o):
pass
def __le__(self, o):
pass
def __gt__(self, o):
pass
def __ge__(self, o):
pass
def __eq__(self, o):
pass
def __repr__(self):
pass
def __hash__(self):
pass
| 9 | 1 | 2 | 0 | 2 | 0 | 1 | 0.18 | 1 | 2 | 1 | 0 | 8 | 0 | 8 | 9 | 28 | 8 | 17 | 9 | 8 | 3 | 17 | 9 | 8 | 1 | 1 | 0 | 8 |
3,801 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/portion/const.py
|
portion.const._NInf
|
class _NInf(_Singleton):
"""
Represent negative infinity.
"""
def __neg__(self):
return _PInf()
def __lt__(self, o):
return not isinstance(o, _NInf)
def __le__(self, o):
return True
def __gt__(self, o):
return False
def __ge__(self, o):
return isinstance(o, _NInf)
def __eq__(self, o):
return isinstance(o, _NInf)
def __repr__(self):
return "-inf"
def __hash__(self):
return hash(float("-inf"))
|
class _NInf(_Singleton):
'''
Represent negative infinity.
'''
def __neg__(self):
pass
def __lt__(self, o):
pass
def __le__(self, o):
pass
def __gt__(self, o):
pass
def __ge__(self, o):
pass
def __eq__(self, o):
pass
def __repr__(self):
pass
def __hash__(self):
pass
| 9 | 1 | 2 | 0 | 2 | 0 | 1 | 0.18 | 1 | 2 | 1 | 0 | 8 | 0 | 8 | 9 | 28 | 8 | 17 | 9 | 8 | 3 | 17 | 9 | 8 | 1 | 1 | 0 | 8 |
3,802 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/portion/const.py
|
portion.const.Bound
|
class Bound(enum.Enum):
"""
Bound types, either CLOSED for inclusive, or OPEN for exclusive.
"""
CLOSED = True
OPEN = False
def __bool__(self):
raise ValueError("The truth value of a bound is ambiguous.")
def __invert__(self):
return Bound.CLOSED if self is Bound.OPEN else Bound.OPEN
def __str__(self):
return self.name
def __repr__(self):
return self.name
|
class Bound(enum.Enum):
'''
Bound types, either CLOSED for inclusive, or OPEN for exclusive.
'''
def __bool__(self):
pass
def __invert__(self):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 5 | 1 | 2 | 0 | 2 | 0 | 1 | 0.27 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 53 | 19 | 5 | 11 | 7 | 6 | 3 | 11 | 7 | 6 | 2 | 4 | 0 | 5 |
3,803 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_func.py
|
tests.test_func.TestIterate
|
class TestIterate:
def test_default_parameters(self):
assert list(P.iterate(P.closed(0, 2), step=1)) == [0, 1, 2]
assert list(P.iterate(P.closedopen(0, 2), step=1)) == [0, 1]
assert list(P.iterate(P.openclosed(0, 2), step=1)) == [1, 2]
assert list(P.iterate(P.open(0, 2), step=1)) == [1]
assert list(P.iterate(P.open(0, 2.5), step=1)) == [1, 2]
def test_empty_intervals(self):
assert list(P.iterate(P.empty(), step=1)) == []
assert list(P.iterate(P.open(0, 1), step=1)) == []
def test_open_intervals(self):
with pytest.raises(ValueError):
list(P.iterate(P.openclosed(-P.inf, 2), step=1))
gen = P.iterate(P.closedopen(0, P.inf), step=1)
assert next(gen) == 0
assert next(gen) == 1
assert next(gen) == 2 # and so on
def test_with_gaps(self):
assert list(P.iterate(P.closed(0, 1) | P.closed(5, 6), step=1)) == [0, 1, 5, 6]
assert list(P.iterate(P.closed(0, 1) | P.closed(2.5, 4), step=1)) == [0, 1, 2.5, 3.5]
assert list(P.iterate(P.open(0, 1) | P.open(1, 2), step=1)) == []
assert list(P.iterate(P.open(0.5, 1) | P.open(1, 3), step=1)) == [2]
def test_with_step(self):
assert list(P.iterate(P.closed(0, 6), step=2)) == [0, 2, 4, 6]
assert list(P.iterate(P.closed(0, 6), step=4)) == [0, 4]
assert list(P.iterate(P.closed(0, 6), step=lambda x: x + 2)) == [0, 2, 4, 6]
def test_with_base(self):
assert list(P.iterate(P.closed(0.4, 2), step=1, base=lambda x: round(x))) == [1, 2]
assert list(P.iterate(P.closed(0.6, 2), step=1, base=lambda x: round(x))) == [1, 2]
def test_reversed_iteration(self):
assert list(P.iterate(P.closed(0, 1), step=-1, reverse=True)) == [1, 0]
assert list(P.iterate(P.open(0, 3), step=-1, reverse=True)) == [2, 1]
assert list(P.iterate(P.closed(0, 1), step=-0.5, reverse=True)) == [1, 0.5, 0]
assert list(P.iterate(P.closed(0, 2), step=-1, base=lambda x: x-1, reverse=True)) == [1, 0]
assert list(P.iterate(P.closed(0, 2) | P.closed(4, 5), step=-1, reverse=True)) == [5, 4, 2, 1, 0]
def test_reversed_iteration_with_open_intervals(self):
with pytest.raises(ValueError):
list(P.iterate(P.closedopen(0, P.inf), step=-1, reverse=True))
gen = P.iterate(P.openclosed(-P.inf, 0), step=-1, reverse=True)
assert next(gen) == 0
assert next(gen) == -1
assert next(gen) == -2
|
class TestIterate:
def test_default_parameters(self):
pass
def test_empty_intervals(self):
pass
def test_open_intervals(self):
pass
def test_with_gaps(self):
pass
def test_with_step(self):
pass
def test_with_base(self):
pass
def test_reversed_iteration(self):
pass
def test_reversed_iteration_with_open_intervals(self):
pass
| 9 | 0 | 5 | 0 | 5 | 0 | 1 | 0.05 | 0 | 2 | 0 | 0 | 8 | 0 | 8 | 8 | 51 | 9 | 42 | 11 | 33 | 2 | 42 | 11 | 33 | 1 | 0 | 1 | 8 |
3,804 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestRepr
|
class TestRepr:
def test_simple(self):
assert repr(P.closed(0, 1)) == '[0,1]'
assert repr(P.openclosed(0, 1)) == '(0,1]'
assert repr(P.closedopen(0, 1)) == '[0,1)'
assert repr(P.open(0, 1)) == '(0,1)'
def test_infinities(self):
assert repr(P.closed(-P.inf, P.inf)) == '(-inf,+inf)'
def test_empty(self):
assert repr(P.empty()) == '()'
def test_singleton(self):
assert repr(P.singleton(4)) == '[4]'
def test_union(self):
assert repr(P.closed(0, 1) | P.open(3, 4)) == '[0,1] | (3,4)'
# https://github.com/AlexandreDecan/portion/issues/22
assert repr(P.singleton(1) | P.singleton(2)) == '[1] | [2]'
|
class TestRepr:
def test_simple(self):
pass
def test_infinities(self):
pass
def test_empty(self):
pass
def test_singleton(self):
pass
def test_union(self):
pass
| 6 | 0 | 3 | 0 | 3 | 0 | 1 | 0.07 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 5 | 20 | 4 | 15 | 6 | 9 | 1 | 15 | 6 | 9 | 1 | 0 | 0 | 5 |
3,805 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalReplace
|
class TestIntervalReplace:
def test_replace_bounds(self):
i = P.open(-P.inf, P.inf)
assert i.replace(lower=lambda v: 1, upper=lambda v: 1) == P.open(-P.inf, P.inf)
assert i.replace(lower=lambda v: 1, upper=lambda v: 2, ignore_inf=False) == P.open(1, 2)
def test_replace_values(self):
i = P.open(0, 1)
assert i.replace(left=P.CLOSED, right=P.CLOSED) == P.closed(0, 1)
assert i.replace(lower=1, upper=2) == P.open(1, 2)
assert i.replace(lower=lambda v: 1, upper=lambda v: 2) == P.open(1, 2)
def test_replace_values_on_infinities(self):
i = P.open(-P.inf, P.inf)
assert i.replace(lower=lambda v: 1, upper=lambda v: 2) == i
assert i.replace(lower=lambda v: 1, upper=lambda v: 2, ignore_inf=False) == P.open(1, 2)
def test_replace_with_union(self):
i = P.closed(0, 1) | P.open(2, 3)
assert i.replace() == i
assert i.replace(P.OPEN, -1, 4, P.OPEN) == P.openclosed(-1, 1) | P.open(2, 4)
assert i.replace(lower=2) == P.closedopen(2, 3)
assert i.replace(upper=1) == P.closedopen(0, 1)
assert i.replace(lower=5) == P.empty()
assert i.replace(upper=-5) == P.empty()
assert i.replace(left=lambda v: ~v, lower=lambda v: v - 1, upper=lambda v: v + 1, right=lambda v: ~v) == P.openclosed(-1, 1) | P.openclosed(2, 4)
def test_replace_with_empty(self):
assert P.empty().replace(left=P.CLOSED, right=P.CLOSED) == P.empty()
assert P.empty().replace(lower=1, upper=2) == P.open(1, 2)
assert P.empty().replace(lower=lambda v: 1, upper=lambda v: 2) == P.empty()
assert P.empty().replace(lower=lambda v: 1, upper=lambda v: 2, ignore_inf=False) == P.open(1, 2)
|
class TestIntervalReplace:
def test_replace_bounds(self):
pass
def test_replace_values(self):
pass
def test_replace_values_on_infinities(self):
pass
def test_replace_with_union(self):
pass
def test_replace_with_empty(self):
pass
| 6 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 5 | 32 | 4 | 28 | 10 | 22 | 0 | 28 | 10 | 22 | 1 | 0 | 0 | 5 |
3,806 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalOverlaps
|
class TestIntervalOverlaps():
def test_overlaps(self):
assert P.closed(1, 2).overlaps(P.closed(2, 3))
assert P.closed(1, 2).overlaps(P.closedopen(2, 3))
assert P.openclosed(1, 2).overlaps(P.closed(2, 3))
assert P.openclosed(1, 2).overlaps(P.closedopen(2, 3))
def test_overlaps_with_nonoverlaping(self):
assert not P.closed(0, 1).overlaps(P.closed(3, 4))
assert not P.closed(3, 4).overlaps(P.closed(0, 1))
def test_overlaps_with_edge_cases(self):
assert not P.closed(0, 1).overlaps(P.open(1, 2))
assert not P.closed(0, 1).overlaps(P.openclosed(1, 2))
assert not P.closedopen(0, 1).overlaps(P.closed(1, 2))
assert not P.closedopen(0, 1).overlaps(P.closedopen(1, 2))
assert not P.closedopen(0, 1).overlaps(P.openclosed(1, 2))
assert not P.closedopen(0, 1).overlaps(P.open(1, 2))
assert not P.open(0, 1).overlaps(P.open(1, 2))
assert P.open(0, 2).overlaps(P.open(0, 1))
assert P.open(0, 1).overlaps(P.open(0, 2))
def test_overlaps_with_empty(self):
assert not P.empty().overlaps(P.open(-P.inf, P.inf))
assert not P.open(-P.inf, P.inf).overlaps(P.empty())
def test_overlaps_with_itself(self):
assert P.closed(0, 1).overlaps(P.closed(0, 1))
assert P.closed(0, 1).overlaps(P.open(0, 1))
assert P.open(0, 1).overlaps(P.closed(0, 1))
assert P.closed(0, 1).overlaps(P.openclosed(0, 1))
assert P.closed(0, 1).overlaps(P.closedopen(0, 1))
def test_overlaps_with_incompatible_types(self):
with pytest.raises(TypeError):
P.closed(0, 1).overlaps(1)
|
class TestIntervalOverlaps():
def test_overlaps(self):
pass
def test_overlaps_with_nonoverlaping(self):
pass
def test_overlaps_with_edge_cases(self):
pass
def test_overlaps_with_empty(self):
pass
def test_overlaps_with_itself(self):
pass
def test_overlaps_with_incompatible_types(self):
pass
| 7 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 | 0 | 6 | 6 | 36 | 5 | 31 | 7 | 24 | 0 | 31 | 7 | 24 | 1 | 0 | 1 | 6 |
3,807 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalIteration
|
class TestIntervalIteration:
def test_length(self):
i1 = P.closed(10, 10) | P.closed(5, 6) | P.closed(7, 8) | P.closed(8, 9)
assert len(i1) == 3
def test_containment(self):
i1 = P.closed(10, 10) | P.closed(5, 6) | P.closed(7, 8) | P.closed(8, 9)
for i in i1:
assert i in i1
def test_order(self):
i1 = P.closed(10, 10) | P.closed(5, 6) | P.closed(7, 8) | P.closed(8, 9)
assert sorted(i1, key=lambda i: i.lower) == list(i1)
assert sorted(i1, key=lambda i: i.upper) == list(i1)
def test_indexes(self):
i1 = P.closed(10, 10) | P.closed(5, 6) | P.closed(7, 8) | P.closed(8, 9)
assert i1[0] == P.closed(5, 6)
assert i1[1] == P.closed(7, 9)
assert i1[2] == P.closed(10, 10)
assert i1[-1] == P.closed(10, 10)
def test_slices(self):
items = [P.closed(5, 6), P.closed(7, 9), P.singleton(10)]
interval = P.Interval(*items)
assert interval[:] == P.Interval(*items)
assert interval[:2] == P.Interval(*items[:2])
assert interval[::-1] == P.Interval(*items[::-1])
assert interval[::2] == P.Interval(*items[::2])
def test_missing_index(self):
i1 = P.closed(10, 10) | P.closed(5, 6) | P.closed(7, 8) | P.closed(8, 9)
with pytest.raises(IndexError):
i1[3]
def test_empty(self):
assert len(P.empty()) == 0
assert list(P.empty()) == []
with pytest.raises(IndexError):
P.empty()[0]
|
class TestIntervalIteration:
def test_length(self):
pass
def test_containment(self):
pass
def test_order(self):
pass
def test_indexes(self):
pass
def test_slices(self):
pass
def test_missing_index(self):
pass
def test_empty(self):
pass
| 8 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 0 | 3 | 1 | 0 | 7 | 0 | 7 | 7 | 40 | 6 | 34 | 16 | 26 | 0 | 34 | 16 | 26 | 2 | 0 | 1 | 8 |
3,808 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalIntersection
|
class TestIntervalIntersection:
def test_with_itself(self):
assert P.closed(0, 1) & P.closed(0, 1) == P.closed(0, 1)
assert P.closed(0, 1) & P.open(0, 1) == P.open(0, 1)
assert P.openclosed(0, 1) & P.closedopen(0, 1) == P.open(0, 1)
def test_with_adjacent(self):
assert P.closed(0, 2) & P.closed(2, 4) == P.singleton(2)
assert P.open(0, 2) & P.open(2, 4) == P.empty()
def test_with_containment(self):
assert P.closed(0, 4) & P.closed(2, 3) == P.closed(2, 3)
assert P.closed(0, 4) & P.closed(0, 3) == P.closed(0, 3)
assert P.closed(0, 4) & P.open(2, 3) == P.open(2, 3)
assert P.open(0, 4) & P.closed(2, 3) == P.closed(2, 3)
assert P.open(0, 4) & P.closed(3, 4) == P.closedopen(3, 4)
def test_with_overlap(self):
assert P.closed(0, 3) & P.closed(2, 4) == P.closed(2, 3)
assert P.open(0, 3) & P.closed(2, 4) == P.closedopen(2, 3)
def test_with_union(self):
assert (P.closed(0, 2) | P.closed(4, 6)) & (P.closed(0, 1) | P.closed(4, 5)) == P.closed(0, 1) | P.closed(4, 5)
assert (P.closed(0, 2) | P.closed(4, 6)) & (P.closed(-1, 1) | P.closed(3, 6)) == P.closed(0, 1) | P.closed(4, 6)
assert (P.closed(0, 2) | P.closed(4, 6)) & (P.closed(1, 4) | P.singleton(5)) == P.closed(1, 2) | P.singleton(4) | P.singleton(5)
def test_empty(self):
assert (P.closed(0, 1) & P.closed(2, 3)).empty
assert P.closed(0, 1) & P.empty() == P.empty()
def test_proxy_method(self):
i1, i2 = P.closed(0, 1), P.closed(2, 3)
assert i1 & i2 == i1.intersection(i2)
def test_with_invalid_type(self):
with pytest.raises(TypeError):
P.closed(0, 1) & 1
|
class TestIntervalIntersection:
def test_with_itself(self):
pass
def test_with_adjacent(self):
pass
def test_with_containment(self):
pass
def test_with_overlap(self):
pass
def test_with_union(self):
pass
def test_empty(self):
pass
def test_proxy_method(self):
pass
def test_with_invalid_type(self):
pass
| 9 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 8 | 0 | 8 | 8 | 37 | 7 | 30 | 10 | 21 | 0 | 30 | 10 | 21 | 1 | 0 | 1 | 8 |
3,809 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalContainment
|
class TestIntervalContainment:
def test_with_values(self):
assert 1 in P.closed(0, 2)
assert 1 in P.closed(1, 2)
assert 1 in P.closed(0, 1)
assert 1 in P.open(0, 2)
assert 1 not in P.open(0, 1)
assert 1 not in P.open(1, 2)
assert 1 in P.closed(0, 2) | P.closed(4, 6) | P.closed(8, 10)
assert 5 in P.closed(0, 2) | P.closed(4, 6) | P.closed(8, 10)
assert 10 in P.closed(0, 2) | P.closed(4, 6) | P.closed(8, 10)
assert -1 not in P.closed(0, 2) | P.closed(4, 6) | P.closed(8, 10)
assert 3 not in P.closed(0, 2) | P.closed(4, 6) | P.closed(8, 10)
assert 7 not in P.closed(0, 2) | P.closed(4, 6) | P.closed(8, 10)
assert 11 not in P.closed(0, 2) | P.closed(4, 6) | P.closed(8, 10)
def test_with_infinities(self):
assert 1 in P.closed(-P.inf, P.inf)
assert 1 in P.closed(-P.inf, 1)
assert 1 in P.closed(1, P.inf)
assert 1 not in P.closed(-P.inf, 0)
assert 1 not in P.closed(2, P.inf)
assert P.inf not in P.closed(-P.inf, P.inf)
assert -P.inf not in P.closed(-P.inf, P.inf)
assert P.inf not in P.closed(0, 1)
def test_with_empty(self):
assert 1 not in P.empty()
assert P.inf not in P.empty()
assert -P.inf not in P.empty()
def test_with_intervals(self):
assert P.closed(1, 2) in P.closed(0, 3)
assert P.closed(1, 2) in P.closed(1, 2)
assert P.open(1, 2) in P.closed(1, 2)
assert P.closed(1, 2) not in P.open(1, 2)
assert P.closed(0, 1) not in P.closed(1, 2)
assert P.closed(0, 2) not in P.closed(1, 3)
assert P.closed(-P.inf, P.inf) in P.closed(-P.inf, P.inf)
assert P.closed(0, 1) in P.closed(-P.inf, P.inf)
assert P.closed(-P.inf, P.inf) not in P.closed(0, 1)
assert P.singleton(0) | P.singleton(5) in P.closed(0, 5)
assert P.singleton(0) | P.singleton(5) in P.closed(0, 1) | P.closed(4, 5)
# https://github.com/AlexandreDecan/portion/issues/28
assert P.closed(5, 6) not in P.closed(1, 2) | P.closed(3, 4)
assert P.singleton(0) | P.singleton(6) not in P.closed(0, 1) | P.closed(4, 5)
def test_with_unions(self):
assert P.closed(0, 1) | P.closed(2, 3) in P.closed(0, 4)
assert P.closed(0, 1) | P.closed(2, 3) in P.closed(0, 1) | P.closed(2, 3)
assert P.closed(0, 1) | P.closed(2, 3) in P.closed(0, 0) | P.closed(0, 1) | P.closed(2, 3)
assert P.closed(0, 1) | P.closed(2, 3) not in P.closed(0, 2)
assert P.closed(0, 1) | P.closed(2, 3) not in P.closed(0, 1) | P.closedopen(2, 3)
assert P.closed(0, 1) | P.closed(2, 3) not in P.closed(0, 1) | P.closedopen(2, 3) | P.openclosed(3, 4)
def test_with_empty_intervals(self):
assert P.empty() in P.closed(0, 3)
assert P.empty() in P.empty()
assert P.closed(0, 0) not in P.empty()
assert P.singleton(0) | P.singleton(1) not in P.empty()
def test_proxy_method(self):
i1, i2 = P.closed(0, 1), P.closed(2, 3)
assert (1 in i1) == i1.contains(1)
assert (i1 in i2) == i2.contains(i1)
def test_issue_41(self):
# https://github.com/AlexandreDecan/portion/issues/41
assert P.empty() in P.closed(0, 1)
assert P.empty() in P.closed(0, 1) | P.closed(2, 3)
|
class TestIntervalContainment:
def test_with_values(self):
pass
def test_with_infinities(self):
pass
def test_with_empty(self):
pass
def test_with_intervals(self):
pass
def test_with_unions(self):
pass
def test_with_empty_intervals(self):
pass
def test_proxy_method(self):
pass
def test_issue_41(self):
pass
| 9 | 0 | 9 | 1 | 8 | 0 | 1 | 0.03 | 0 | 0 | 0 | 0 | 8 | 0 | 8 | 8 | 77 | 14 | 61 | 10 | 52 | 2 | 61 | 10 | 52 | 1 | 0 | 0 | 8 |
3,810 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalApply
|
class TestIntervalApply:
def test_apply(self):
i = P.closed(0, 1)
assert i.apply(lambda s: s) == i
assert i.apply(lambda s: (P.OPEN, -1, 2, P.OPEN)) == P.open(-1, 2)
assert i.apply(lambda s: P.Interval.from_atomic(P.OPEN, -1, 2, P.OPEN)) == P.open(-1, 2)
assert i.apply(lambda s: P.open(-1, 2)) == P.open(-1, 2)
def test_apply_on_unions(self):
i = P.closed(0, 1) | P.closed(2, 3)
assert i.apply(lambda s: s) == i
assert i.apply(lambda s: (P.OPEN, -1, 2, P.OPEN)) == P.open(-1, 2)
assert i.apply(lambda s: (~s.left, s.lower - 1, s.upper - 1, ~s.right)) == P.open(-1, 0) | P.open(1, 2)
assert i.apply(lambda s: P.Interval.from_atomic(P.OPEN, -1, 2, P.OPEN)) == P.open(-1, 2)
assert i.apply(lambda s: P.open(-1, 2)) == P.open(-1, 2)
assert i.apply(lambda s: (s.left, s.lower, s.upper * 2, s.right)) == P.closed(0, 6)
def test_apply_on_empty(self):
assert P.empty().apply(lambda s: (P.CLOSED, 1, 2, P.CLOSED)) == P.empty()
def test_apply_with_incorrect_types(self):
i = P.closed(0, 1)
with pytest.raises(TypeError):
i.apply(lambda s: None)
with pytest.raises(TypeError):
i.apply(lambda s: 'unsupported')
|
class TestIntervalApply:
def test_apply(self):
pass
def test_apply_on_unions(self):
pass
def test_apply_on_empty(self):
pass
def test_apply_with_incorrect_types(self):
pass
| 5 | 0 | 6 | 1 | 6 | 0 | 1 | 0 | 0 | 2 | 1 | 0 | 4 | 0 | 4 | 4 | 28 | 5 | 23 | 8 | 18 | 0 | 23 | 8 | 18 | 1 | 0 | 1 | 4 |
3,811 |
AlexandreDecan/python-intervals
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalComparison
|
class TestIntervalComparison:
@pytest.mark.parametrize('i1,i2,i3', [
(P.closed(0, 1), P.closed(1, 2), P.closed(2, 3)),
(P.open(0, 2), P.open(1, 3), P.open(2, 4)),
])
def test_equalities(self, i1, i2, i3):
assert i1 == i1
assert i1 != i2 and i2 != i1
assert i1 != i3 and i3 != i1
assert i2 != i3 and i3 != i2
assert not i1 == 1
@pytest.mark.parametrize('i1,i2,i3', [
(P.closed(0, 1), P.closed(1, 2), P.closed(2, 3)),
(P.open(0, 2), P.open(1, 3), P.open(2, 4)),
])
def test_inequalities(self, i1, i2, i3):
assert i1 < i3 and i3 > i1
assert i1 <= i2 and i2 >= i1
assert i1 <= i3 and i3 >= i1
assert not i1 < i2 and not i2 > i1
def test_with_intervals(self):
i1, i2, i3 = P.closed(0, 1), P.closed(1, 2), P.closed(2, 3)
i4 = i1 | i3
assert i4 != i2 and i2 != i4
assert not i4 < i2 and not i2 > i4
assert not i4 > i2 and not i2 < i4
assert not i4 <= i2
assert not i4 >= i2
assert i2 <= i4
assert i2 >= i4
i5 = P.closed(5, 6) | P.closed(7, 8)
assert i4 != i5 and i5 != i4
assert i4 < i5 and i5 > i4
assert not i4 > i5 and not i5 < i4
assert i4 <= i5
assert i5 >= i4
assert not i4 >= i5
assert not i5 <= i4
def test_with_empty(self):
assert not (P.empty() < P.empty())
assert not (P.empty() <= P.empty())
assert not (P.empty() > P.empty())
assert not (P.empty() >= P.empty())
assert not (P.empty() < P.closed(2, 3))
assert not (P.empty() <= P.closed(2, 3))
assert not (P.empty() > P.closed(2, 3))
assert not (P.empty() >= P.closed(2, 3))
assert not (P.closed(2, 3) < P.empty())
assert not (P.closed(2, 3) > P.empty())
assert not (P.closed(2, 3) <= P.empty())
assert not (P.closed(2, 3) >= P.empty())
def test_with_empty_and_infinities(self):
assert not (P.empty() < P.closedopen(0, P.inf))
assert not (P.empty() <= P.closedopen(0, P.inf))
assert not (P.empty() > P.closedopen(0, P.inf))
assert not (P.empty() >= P.closedopen(0, P.inf))
assert not (P.closedopen(0, P.inf) < P.empty())
assert not (P.closedopen(0, P.inf) > P.empty())
assert not (P.closedopen(0, P.inf) <= P.empty())
assert not (P.closedopen(0, P.inf) >= P.empty())
def test_edge_cases(self):
assert not (P.closed(0, 2) >= P.open(0, 1))
assert not (P.closed(0, 2) >= P.openclosed(0, 1))
def test_with_values(self):
with pytest.deprecated_call():
assert 0 < P.closed(1, 2)
with pytest.deprecated_call():
assert not (0 < P.closed(-1, 1))
with pytest.deprecated_call():
assert not (0 < P.closed(0, 1))
with pytest.deprecated_call():
assert 0 < P.open(0, 1)
# assert 0 <= P.closed(1, 2)
# assert 0 <= P.open(0, 1)
# assert 0 <= P.closed(-1, 1)
# assert not (0 <= P.closed(-2, -1))
# assert not (0 <= P.open(-1, 0))
with pytest.deprecated_call():
assert P.closed(1, 2) > 0
with pytest.deprecated_call():
assert not (P.closed(-1, 1) > 0)
with pytest.deprecated_call():
assert not (P.closed(0, 1) > 0)
with pytest.deprecated_call():
assert P.open(0, 1) > 0
with pytest.deprecated_call():
assert P.closed(1, 2) >= 0
with pytest.deprecated_call():
assert P.open(0, 1) >= 0
with pytest.deprecated_call():
assert not (P.closed(-1, 1) >= 0)
with pytest.deprecated_call():
assert not (P.closed(-2, -1) >= 0)
with pytest.deprecated_call():
assert not (P.open(-1, 0) >= 0)
with pytest.deprecated_call():
assert not (0 < P.empty())
with pytest.deprecated_call():
assert not (0 <= P.empty())
with pytest.deprecated_call():
assert not (0 > P.empty())
with pytest.deprecated_call():
assert not (0 >= P.empty())
with pytest.deprecated_call():
assert not (P.empty() < 0)
with pytest.deprecated_call():
assert not (P.empty() <= 0)
with pytest.deprecated_call():
assert not (P.empty() > 0)
with pytest.deprecated_call():
assert not (P.empty() >= 0)
|
class TestIntervalComparison:
@pytest.mark.parametrize('i1,i2,i3', [
(P.closed(0, 1), P.closed(1, 2), P.closed(2, 3)),
(P.open(0, 2), P.open(1, 3), P.open(2, 4)),
])
def test_equalities(self, i1, i2, i3):
pass
@pytest.mark.parametrize('i1,i2,i3', [
(P.closed(0, 1), P.closed(1, 2), P.closed(2, 3)),
(P.open(0, 2), P.open(1, 3), P.open(2, 4)),
])
def test_inequalities(self, i1, i2, i3):
pass
def test_with_intervals(self):
pass
def test_with_empty(self):
pass
def test_with_empty_and_infinities(self):
pass
def test_edge_cases(self):
pass
def test_with_values(self):
pass
| 10 | 0 | 19 | 4 | 14 | 1 | 1 | 0.05 | 0 | 0 | 0 | 0 | 7 | 0 | 7 | 7 | 145 | 34 | 106 | 19 | 90 | 5 | 98 | 11 | 90 | 1 | 0 | 1 | 7 |
3,812 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestInterval
|
class TestInterval:
def test_creation(self):
assert P.Interval() == P.empty()
assert P.Interval(P.closed(0, 1)) == P.closed(0, 1)
assert P.Interval(P.closed(0, 1)) == P.closed(0, 1)
assert P.Interval(P.closed(0, 1), P.closed(2, 3)) == P.closed(0, 1) | P.closed(2, 3)
assert P.Interval(P.closed(0, 1) | P.closed(2, 3)) == P.closed(0, 1) | P.closed(2, 3)
with pytest.raises(TypeError):
P.Interval(1)
def test_creation_issue_19(self):
# https://github.com/AlexandreDecan/python-intervals/issues/19
assert P.Interval(P.empty(), P.empty()) == P.empty()
def test_bounds(self):
i = P.openclosed(1, 2)
assert i.left == P.OPEN
assert i.right == P.CLOSED
assert i.lower == 1
assert i.upper == 2
def test_bounds_on_empty(self):
i = P.empty()
assert i.left == P.OPEN
assert i.right == P.OPEN
assert i.lower == P.inf
assert i.upper == -P.inf
i = P.openclosed(10, -10)
assert i.left == P.OPEN
assert i.right == P.OPEN
assert i.lower == P.inf
assert i.upper == -P.inf
i = P.open(0, 1) | P.closed(3, 4)
assert i.left == P.OPEN
assert i.right == P.CLOSED
assert i.lower == 0
assert i.upper == 4
def test_bounds_on_union(self):
i = P.closedopen(0, 1) | P.openclosed(3, 4)
assert i.left == P.CLOSED
assert i.right == P.CLOSED
assert i.lower == 0
assert i.upper == 4
def test_is_empty(self):
assert P.openclosed(1, 1).empty
assert P.closedopen(1, 1).empty
assert P.open(1, 1).empty
assert not P.closed(1, 1).empty
assert P.Interval().empty
assert P.empty().empty
def test_hash_with_hashable(self):
assert hash(P.closed(0, 1)) is not None
assert hash(P.closed(0, 1)) != hash(P.closed(1, 2))
assert hash(P.openclosed(-P.inf, 0)) is not None
assert hash(P.closedopen(0, P.inf)) is not None
assert hash(P.empty()) is not None
assert hash(P.closed(0, 1) | P.closed(3, 4)) is not None
assert hash(P.closed(0, 1) | P.closed(3, 4)) != hash(P.closed(0, 1))
assert hash(P.closed(0, 1) | P.closed(3, 4)) != hash(P.closed(3, 4))
def test_hash_with_unhashable(self):
# Let's create a comparable but no hashable object
class T(int):
def __hash__(self):
raise TypeError()
x = P.closed(T(1), T(2))
with pytest.raises(TypeError):
hash(x)
with pytest.raises(TypeError):
hash(x | P.closed(3, 4))
with pytest.raises(TypeError):
hash(P.closed(-1, 0) | x)
# Not guaranteed to work
assert hash(P.closed(-1, 0) | x | P.closed(3, 4)) is not None
def test_enclosure(self):
assert P.closed(0, 1) == P.closed(0, 1).enclosure
assert P.open(0, 1) == P.open(0, 1).enclosure
assert P.closed(0, 4) == (P.closed(0, 1) | P.closed(3, 4)).enclosure
assert P.openclosed(0, 4) == (P.open(0, 1) | P.closed(3, 4)).enclosure
|
class TestInterval:
def test_creation(self):
pass
def test_creation_issue_19(self):
pass
def test_bounds(self):
pass
def test_bounds_on_empty(self):
pass
def test_bounds_on_union(self):
pass
def test_is_empty(self):
pass
def test_hash_with_hashable(self):
pass
def test_hash_with_unhashable(self):
pass
class TestInterval:
def __hash__(self):
pass
def test_enclosure(self):
pass
| 12 | 0 | 9 | 1 | 7 | 0 | 1 | 0.04 | 0 | 3 | 2 | 0 | 9 | 0 | 9 | 9 | 92 | 17 | 72 | 16 | 60 | 3 | 72 | 16 | 60 | 1 | 0 | 1 | 10 |
3,813 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestIntervalUnion
|
class TestIntervalUnion:
def test_atomic(self):
assert P.closed(1, 2) | P.closed(1, 2) == P.closed(1, 2)
assert P.closed(1, 4) | P.closed(2, 3) == P.closed(1, 4)
assert P.closed(1, 2) | P.closed(2, 3) == P.closed(2, 3) | P.closed(1, 2)
assert P.closed(1, 2) | P.closed(3, 4) == P.closed(1, 2) | P.closed(3, 4)
def test_with_itself(self):
assert P.closed(1, 2) | P.closed(1, 2) == P.closed(1, 2)
assert P.open(1, 2) | P.closed(1, 2) == P.closed(1, 2)
assert P.closedopen(1, 2) | P.openclosed(1, 2) == P.closed(1, 2)
def test_with_containment(self):
assert P.closed(1, 4) | P.closed(2, 3) == P.closed(1, 4)
assert P.open(1, 4) | P.closed(2, 3) == P.open(1, 4)
def test_with_overlap(self):
assert P.closed(1, 3) | P.closed(2, 4) == P.closed(1, 4)
def test_with_adjacent(self):
assert P.closed(1, 2) | P.closed(2, 3) == P.closed(1, 3)
assert P.closed(1, 2) | P.open(2, 3) == P.closedopen(1, 3)
assert P.open(1, 2) | P.closed(2, 3) == P.openclosed(1, 3)
assert P.open(1, 3) | P.open(2, 4) == P.open(1, 4)
assert P.closedopen(1, 2) | P.closed(2, 3) == P.closed(1, 3)
assert P.open(1, 2) | P.closed(2, 4) == P.openclosed(1, 4)
def test_with_disjoint(self):
assert P.closed(1, 2) | P.closed(3, 4) != P.closed(1, 4)
assert (P.closed(1, 2) | P.closed(3, 4) | P.closed(2, 3)).atomic
assert P.closed(1, 2) | P.closed(3, 4) | P.closed(2, 3) == P.closed(1, 4)
assert P.closed(1, 2) | P.closed(0, 4) == P.closed(0, 4)
assert (P.closed(0, 1) | P.closed(2, 3) | P.closed(1, 2)).atomic
assert P.closed(0, 1) | P.closed(2, 3) | P.closed(1, 2) == P.closed(0, 3)
def test_with_empty(self):
assert P.closed(0, 1) | P.empty() == P.closed(0, 1)
def test_issue_12(self):
# https://github.com/AlexandreDecan/python-intervals/issues/12
assert P.open(0, 2) | P.closed(0, 2) == P.closed(0, 2)
assert P.open(0, 2) | P.closed(1, 2) == P.openclosed(0, 2)
assert P.open(0, 2) | P.closed(0, 1) == P.closedopen(0, 2)
assert P.closed(0, 2) | P.open(0, 2) == P.closed(0, 2)
assert P.closed(1, 2) | P.open(0, 2) == P.openclosed(0, 2)
assert P.closed(0, 1) | P.open(0, 2) == P.closedopen(0, 2)
assert P.closed(0, 2) | P.singleton(2) == P.closed(0, 2)
assert P.closedopen(0, 2) | P.singleton(2) == P.closed(0, 2)
assert P.openclosed(0, 2) | P.singleton(2) == P.openclosed(0, 2)
assert P.openclosed(0, 2) | P.singleton(0) == P.closed(0, 2)
assert P.singleton(2) | P.closed(0, 2) == P.closed(0, 2)
assert P.singleton(2) | P.closedopen(0, 2) == P.closed(0, 2)
assert P.singleton(2) | P.openclosed(0, 2) == P.openclosed(0, 2)
assert P.singleton(0) | P.openclosed(0, 2) == P.closed(0, 2)
def test_issue_13(self):
# https://github.com/AlexandreDecan/python-intervals/issues/13
assert P.closed(1, 1) | P.openclosed(1, 2) == P.closed(1, 2)
assert P.openclosed(1, 2) | P.closed(1, 1) == P.closed(1, 2)
assert P.closed(0, 1) | P.openclosed(1, 2) == P.closed(0, 2)
assert P.openclosed(1, 2) | P.closed(0, 1) == P.closed(0, 2)
assert P.openclosed(1, 2) | P.closed(1, 1) == P.closed(1, 2)
assert P.closed(1, 1) | P.openclosed(1, 2) == P.closed(1, 2)
assert P.openclosed(1, 2) | P.closed(0, 1) == P.closed(0, 2)
assert P.closed(0, 1) | P.openclosed(1, 2) == P.closed(0, 2)
def test_issue_38(self):
# https://github.com/AlexandreDecan/portion/issues/38
assert P.open(1, 2) | P.open(2, 3) | P.singleton(2) == P.open(1, 3)
assert P.open(2, 3) | P.open(1, 2) | P.singleton(2) == P.open(1, 3)
assert P.open(1, 2) | P.singleton(2) | P.open(2, 3) == P.open(1, 3)
assert P.open(2, 3) | P.singleton(2) | P.open(1, 2) == P.open(1, 3)
assert P.singleton(2) | P.open(2, 3) | P.open(1, 2) == P.open(1, 3)
assert P.singleton(2) | P.open(1, 2) | P.open(2, 3) == P.open(1, 3)
def test_proxy_method(self):
i1, i2 = P.closed(0, 1), P.closed(2, 3)
assert i1 | i2 == i1.union(i2)
def test_with_invalid_type(self):
with pytest.raises(TypeError):
P.closed(0, 1) | 1
|
class TestIntervalUnion:
def test_atomic(self):
pass
def test_with_itself(self):
pass
def test_with_containment(self):
pass
def test_with_overlap(self):
pass
def test_with_adjacent(self):
pass
def test_with_disjoint(self):
pass
def test_with_empty(self):
pass
def test_issue_12(self):
pass
def test_issue_13(self):
pass
def test_issue_38(self):
pass
def test_proxy_method(self):
pass
def test_with_invalid_type(self):
pass
| 13 | 0 | 6 | 1 | 6 | 0 | 1 | 0.04 | 0 | 1 | 0 | 0 | 12 | 0 | 12 | 12 | 89 | 18 | 68 | 14 | 55 | 3 | 68 | 14 | 55 | 1 | 0 | 1 | 12 |
3,814 |
AlexandreDecan/python-intervals
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/AlexandreDecan_python-intervals/tests/test_interval.py
|
tests.test_interval.TestInterval.test_hash_with_unhashable.T
|
class T(int):
def __hash__(self):
raise TypeError()
|
class T(int):
def __hash__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 56 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
3,815 |
AlexandreDecan/python-intervals
|
AlexandreDecan_python-intervals/tests/test_const.py
|
tests.test_const.TestInfinities
|
class TestInfinities:
def test_pinf_is_greater(self):
assert not (inf > inf)
assert inf > -inf
assert inf > 0
assert inf > 'a'
assert inf > []
assert inf >= inf
assert inf >= -inf
assert inf >= 0
assert inf >= 'a'
assert inf >= []
def test_ninf_is_lower(self):
assert not (-inf < -inf)
assert -inf < inf
assert -inf < 0
assert -inf < 'a'
assert -inf < []
assert -inf <= -inf
assert -inf <= inf
assert -inf <= 0
assert -inf <= 'a'
assert -inf <= []
def test_equalities(self):
assert inf != -inf
assert inf == inf
assert -inf == -inf
def test_infinities_are_singletons(self):
assert _PInf() is _PInf()
assert inf is _PInf()
assert _NInf() is _NInf()
assert -inf is _NInf()
assert -(-inf) is inf
def test_infinities_are_hashable(self):
assert hash(inf) is not None
assert hash(-inf) is not None
assert hash(inf) != hash(-inf)
|
class TestInfinities:
def test_pinf_is_greater(self):
pass
def test_ninf_is_lower(self):
pass
def test_equalities(self):
pass
def test_infinities_are_singletons(self):
pass
def test_infinities_are_hashable(self):
pass
| 6 | 0 | 8 | 1 | 7 | 0 | 1 | 0 | 0 | 2 | 2 | 0 | 5 | 0 | 5 | 5 | 45 | 8 | 37 | 6 | 31 | 0 | 37 | 6 | 31 | 1 | 0 | 0 | 5 |
3,816 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/parser.py
|
eralchemy.parser.RelationNoColException
|
class RelationNoColException(ParsingException):
hint = "Try to declare the tables before the relationships."
|
class RelationNoColException(ParsingException):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
3,817 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/parser.py
|
eralchemy.parser.DuplicateTableException
|
class DuplicateTableException(ParsingException):
pass
|
class DuplicateTableException(ParsingException):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
3,818 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/eralchemy/sqla.py
|
eralchemy.sqla.DeclarativeBase
|
class DeclarativeBase(Protocol):
metadata: sa.MetaData
|
class DeclarativeBase(Protocol):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
3,819 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/create_db.py
|
tests.create_db.create_db.Department
|
class Department(Base):
__tablename__ = "department"
id = Column(Integer, primary_key=True)
name = Column(String)
|
class Department(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
3,820 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/create_db.py
|
tests.create_db.create_db.Employee
|
class Employee(Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String)
# Use default=func.now() to set the default hiring time
# of an Employee to be the current time when an
# Employee record was created
hired_on = Column(DateTime, default=func.now())
department_id = Column(Integer, ForeignKey("department.id"))
# Use cascade='delete,all' to propagate the deletion of a Department onto its Employees
department = relationship(
Department,
backref=backref("employees", uselist=True, cascade="delete,all"),
)
|
class Employee(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0 | 10 | 6 | 9 | 4 | 7 | 6 | 6 | 0 | 1 | 0 | 0 |
3,821 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/tests/common.py
|
tests.common.ChildWithSchema
|
class ChildWithSchema(Base):
__tablename__ = "child"
__table_args__ = {"schema": "eralchemy_test"}
id = Column(Integer, primary_key=True)
parent_id = Column(ForeignKey("eralchemy_test.parent.id"))
parent = relationship("ParentWithSchema", backref="eralchemy_test.children")
|
class ChildWithSchema(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 5 | 5 | 0 | 6 | 5 | 5 | 0 | 1 | 0 | 0 |
3,822 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/tests/common.py
|
tests.common.Child
|
class Child(Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
parent_id = Column(ForeignKey("parent.id"))
parent = relationship("Parent", backref="children")
|
class Child(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 1 | 0 | 0 |
3,823 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/graph.py
|
graph.Node
|
class Node(Base):
__tablename__ = "node"
node_id: Mapped[int] = mapped_column(Integer, primary_key=True)
lower_edges: Mapped[list[Edge]] = relationship(back_populates="lower_node")
higher_edges: Mapped[list[Edge]] = relationship(back_populates="higher_node")
def add_neighbors(self, *nodes):
for node in nodes:
Edge(self, node)
return self
def higher_neighbors(self) -> list[Node]:
return [x.higher_node for x in self.lower_edges]
def lower_neighbors(self) -> list[Node]:
return [x.lower_node for x in self.higher_edges]
|
class Node(Base):
def add_neighbors(self, *nodes):
pass
def higher_neighbors(self) -> list[Node]:
pass
def lower_neighbors(self) -> list[Node]:
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 3 | 0 | 3 | 3 | 17 | 4 | 13 | 9 | 9 | 0 | 13 | 9 | 9 | 2 | 1 | 1 | 4 |
3,824 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/graph.py
|
graph.Edge
|
class Edge(Base):
__tablename__ = "edge"
lower_id: Mapped[int] = mapped_column(Integer, ForeignKey("node.node_id"), primary_key=True)
higher_id: Mapped[int] = mapped_column(Integer, ForeignKey("node.node_id"), primary_key=True)
lower_node: Mapped[Node] = relationship(back_populates="lower_edges")
higher_node: Mapped[Node] = relationship(back_populates="higher_edges")
def __init__(self, n1: Node, n2: Node) -> None:
if n1.node_id < n2.node_id:
self.lower_node = n1
self.higher_node = n2
else:
self.lower_node = n2
self.higher_node = n1
|
class Edge(Base):
def __init__(self, n1: Node, n2: Node) -> None:
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 15 | 2 | 13 | 7 | 11 | 0 | 12 | 7 | 10 | 2 | 1 | 1 | 2 |
3,825 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/forum.py
|
forum.User
|
class User(Base):
__tablename__ = "users"
# user roles
MEMBER = 100
MODERATOR = 200
ADMIN = 300
id = Column(Integer, primary_key=True)
username = Column(Unicode(60), unique=True, nullable=False)
email = Column(String(150), unique=True, nullable=False)
karma = Column(Integer, default=0)
date_joined = Column(DateTime, default=datetime.utcnow)
activation_key = Column(String(80), unique=True)
role = Column(Integer, default=MEMBER)
receive_email = Column(Boolean, default=False)
email_alerts = Column(Boolean, default=False)
followers = Column(Text)
following = Column(Text)
_password = Column("password", String(80))
_openid = Column("openid", String(80), unique=True)
|
class User(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 3 | 18 | 17 | 17 | 1 | 18 | 17 | 17 | 0 | 1 | 0 | 0 |
3,826 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/forum.py
|
forum.Tag
|
class Tag(Base):
__tablename__ = "tags"
id = Column(Integer, primary_key=True)
slug = Column(Unicode(80), unique=True)
_name = Column("name", Unicode(80), unique=True)
|
class Tag(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 2 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 1 | 0 | 0 |
3,827 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/parser.py
|
eralchemy.parser.DuplicateColumnException
|
class DuplicateColumnException(ParsingException):
pass
|
class DuplicateColumnException(ParsingException):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
3,828 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/test_sqla_multi_key.py
|
tests.test_sqla_multi_key.test_columns_one_to_many_parent.Child
|
class Child(Base):
__tablename__ = "child"
id = Column(String(), primary_key=True)
parent = Column(String(), ForeignKey(Parent.id))
|
class Child(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
3,829 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/test_sqla_multi_key.py
|
tests.test_sqla_multi_key.test_columns_one_to_one_parent.Child
|
class Child(Base):
__tablename__ = "child"
id = Column(String(), ForeignKey(Parent.id), primary_key=True)
|
class Child(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 1 | 0 | 0 |
3,830 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/test_sqla_multi_key.py
|
tests.test_sqla_multi_key.test_columns_parent.Child
|
class Child(Base):
__tablename__ = "child"
id = Column(String(), primary_key=True)
parent = Column(String(), ForeignKey(Parent.id), primary_key=True)
|
class Child(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
3,831 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/models.py
|
eralchemy.models.Table
|
class Table(Drawable):
"""Represents a Table in the intermediaty syntax."""
RE = re.compile(r"\[(?P<name>[^]]+)\]")
def __init__(self, name: str, columns: list[Column]) -> None:
self.name = name
self.columns = columns
@staticmethod
def make_from_match(match: re.Match) -> Table:
return Table(name=match.group("name"), columns=[])
@property
def header_markdown(self) -> str:
return f"[{self.name}]"
def to_markdown(self) -> str:
return self.header_markdown + "\n" + "\n".join(c.to_markdown() for c in self.columns)
def to_mermaid(self) -> str:
columns = [c.to_mermaid() for c in self.columns]
name = sanitize_mermaid(self.name)
return f" class {name}{{\n " + "\n ".join(columns) + "\n }"
def to_mermaid_er(self) -> str:
columns = [c.to_mermaid_er() for c in self.columns]
name = sanitize_mermaid(self.name, is_er=True)
return f"{name} {{\n" + "\n ".join(columns) + "\n}"
@property
def columns_sorted(self):
return sorted(self.columns, key=operator.attrgetter("name"))
@property
def header_dot(self) -> str:
return ROW_TAGS.format("", f'<B><FONT POINT-SIZE="16">{self.name}</FONT></B>')
def to_dot(self) -> str:
body = "".join(c.to_dot() for c in self.columns)
return TABLE.format(self.name, self.header_dot, body)
def __str__(self) -> str:
return self.header_markdown
def __eq__(self, other: object) -> bool:
if not isinstance(other, Table):
return False
if other.name != self.name:
return False
if self.columns_sorted != other.columns_sorted:
return False
return True
|
class Table(Drawable):
'''Represents a Table in the intermediaty syntax.'''
def __init__(self, name: str, columns: list[Column]) -> None:
pass
@staticmethod
def make_from_match(match: re.Match) -> Table:
pass
@property
def header_markdown(self) -> str:
pass
def to_markdown(self) -> str:
pass
def to_mermaid(self) -> str:
pass
def to_mermaid_er(self) -> str:
pass
@property
def columns_sorted(self):
pass
@property
def header_dot(self) -> str:
pass
def to_dot(self) -> str:
pass
def __str__(self) -> str:
pass
def __eq__(self, other: object) -> bool:
pass
| 16 | 1 | 3 | 0 | 3 | 0 | 1 | 0.03 | 1 | 5 | 1 | 0 | 10 | 2 | 11 | 36 | 54 | 13 | 40 | 24 | 24 | 1 | 36 | 20 | 24 | 4 | 5 | 1 | 14 |
3,832 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/parser.py
|
eralchemy.parser.NoCurrentTableException
|
class NoCurrentTableException(ParsingException):
hint = "Try to declare the tables before the relationships and columns."
|
class NoCurrentTableException(ParsingException):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
3,833 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/models.py
|
eralchemy.models.Relation
|
class Relation(Drawable):
"""Represents a Relation in the intermediaty syntax."""
RE = re.compile(
r"""
(?P<left_table>[^\s]+?)
(?:\.\"(?P<left_column>.+)\")?
\s*
(?P<left_cardinality>[*?+1])
--
(?P<right_cardinality>[*?+1])
\s*
(?P<right_table>[^\s]+?)
(?:\.\"(?P<right_column>.+)\")?
\s*$
""",
re.VERBOSE,
)
cardinalities = {"*": "0..N", "?": "{0,1}", "+": "1..N", "1": "1", "": None}
cardinalities_mermaid = {
"*": "0..n",
"?": "0..1",
"+": "1..n",
}
cardinalities_crowfoot = {
"*": "0+",
"?": "one or zero",
"+": "1+",
}
@staticmethod
def make_from_match(match: re.Match) -> Relation:
return Relation(**match.groupdict())
def __init__(
self,
right_table,
left_table,
right_cardinality=None,
left_cardinality=None,
right_column=None,
left_column=None,
):
if (
right_cardinality not in self.cardinalities.keys()
or left_cardinality not in self.cardinalities.keys()
):
raise ValueError(f"Cardinality should be in {self.cardinalities.keys()}")
self.right_table = right_table
self.right_column = right_column or ""
self.left_table = left_table
self.left_column = left_column or ""
self.right_cardinality = right_cardinality
self.left_cardinality = left_cardinality
def to_markdown(self) -> str:
return "{}{} {}--{} {}{}".format(
self.left_table,
"" if not self.left_column else f'."{self.left_column}"',
self.left_cardinality,
self.right_cardinality,
self.right_table,
"" if not self.right_column else f'."{self.right_column}"',
)
def to_mermaid(self) -> str:
normalized = (
Relation.cardinalities_mermaid.get(k, k)
for k in (
sanitize_mermaid(self.left_table),
self.left_cardinality,
self.right_cardinality,
sanitize_mermaid(self.right_table),
)
)
return ' {} "{}" -- "{}" {}'.format(*normalized)
def to_mermaid_er(self) -> str:
left = Relation.cardinalities_crowfoot.get(self.left_cardinality, self.left_cardinality)
right = Relation.cardinalities_crowfoot.get(self.right_cardinality, self.right_cardinality)
left_col = sanitize_mermaid(self.left_table, is_er=True)
right_col = sanitize_mermaid(self.right_table, is_er=True)
return f"{left_col} {left}--{right} {right_col} : has"
def graphviz_cardinalities(self, card) -> str:
if card == "":
return ""
return f"label=<<FONT>{self.cardinalities[card]}</FONT>>"
def to_dot(self) -> str:
if self.right_cardinality == self.left_cardinality == "":
return ""
cards = []
if self.left_cardinality != "":
cards.append("tail" + self.graphviz_cardinalities(self.left_cardinality))
if self.right_cardinality != "":
cards.append("head" + self.graphviz_cardinalities(self.right_cardinality))
left_col = f':"{self.left_column}"' if self.left_column else ""
right_col = f':"{self.right_column}"' if self.right_column else ""
return (
f'"{self.left_table}"{left_col} -- "{self.right_table}"{right_col} [{",".join(cards)}];'
)
def __eq__(self, other: object) -> bool:
if super().__eq__(other):
return True
if not isinstance(other, Relation):
return False
other_inversed = Relation(
right_table=other.left_table,
right_column=other.left_column,
left_table=other.right_table,
left_column=other.right_column,
right_cardinality=other.left_cardinality,
left_cardinality=other.right_cardinality,
)
return other_inversed.__dict__ == self.__dict__
|
class Relation(Drawable):
'''Represents a Relation in the intermediaty syntax.'''
@staticmethod
def make_from_match(match: re.Match) -> Relation:
pass
def __init__(
self,
right_table,
left_table,
right_cardinality=None,
left_cardinality=None,
right_column=None,
left_column=None,
):
pass
def to_markdown(self) -> str:
pass
def to_mermaid(self) -> str:
pass
def to_mermaid_er(self) -> str:
pass
def graphviz_cardinalities(self, card) -> str:
pass
def to_dot(self) -> str:
pass
def __eq__(self, other: object) -> bool:
pass
| 10 | 1 | 10 | 0 | 10 | 0 | 2 | 0.01 | 1 | 4 | 0 | 0 | 7 | 6 | 8 | 33 | 118 | 10 | 107 | 37 | 89 | 1 | 49 | 28 | 40 | 6 | 5 | 1 | 19 |
3,834 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/parser.py
|
eralchemy.parser.ParsingException
|
class ParsingException(Exception):
base_traceback = "Error on line {line_nb}: {line}\n{error}"
hint: ClassVar[str | None] = None
@property
def traceback(self) -> str:
rv = self.base_traceback.format(
line_nb=getattr(self, "line_nb", "?"),
line=getattr(self, "line", ""),
error=self.args[0],
)
if self.hint is not None:
rv += f"\nHINT: {self.hint}"
return rv
|
class ParsingException(Exception):
@property
def traceback(self) -> str:
pass
| 3 | 0 | 9 | 0 | 9 | 0 | 2 | 0 | 1 | 1 | 0 | 4 | 1 | 0 | 1 | 11 | 14 | 1 | 13 | 6 | 10 | 0 | 8 | 5 | 6 | 2 | 3 | 1 | 2 |
3,835 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/test_sqla_multi_key.py
|
tests.test_sqla_multi_key.test_columns_parent.Parent
|
class Parent(Base):
__tablename__ = "parent"
id = Column(String(), primary_key=True)
|
class Parent(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 1 | 0 | 0 |
3,836 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/test_sqla_multi_key.py
|
tests.test_sqla_multi_key.test_compound_one_to_one_parent.Parent
|
class Parent(Base):
__tablename__ = "parent"
id = Column(String(), primary_key=True)
id2 = Column(String(), primary_key=True)
|
class Parent(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
3,837 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/flask-sqla.py
|
flask-sqla.Posts
|
class Posts(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(120), nullable=False)
description = db.Column(db.String(1000), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
|
class Posts(db.Model):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 5 | 5 | 0 | 6 | 5 | 5 | 0 | 1 | 0 | 0 |
3,838 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/tests/common.py
|
tests.common.Exclude
|
class Exclude(Base):
__tablename__ = "exclude"
id = Column(Integer, primary_key=True)
parent_id = Column(ForeignKey("parent.id"))
parent = relationship("Parent", backref="excludes")
|
class Exclude(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 1 | 0 | 0 |
3,839 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/tests/common.py
|
tests.common.ExcludeWithSchema
|
class ExcludeWithSchema(Base):
__tablename__ = "exclude"
__table_args__ = {"schema": "eralchemy_test"}
id = Column(Integer, primary_key=True)
parent_id = Column(ForeignKey("eralchemy_test.parent.id"))
parent = relationship("ParentWithSchema", backref="eralchemy_test.excludes")
|
class ExcludeWithSchema(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 5 | 5 | 0 | 6 | 5 | 5 | 0 | 1 | 0 | 0 |
3,840 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/forum.py
|
forum.Post
|
class Post(Base):
__tablename__ = "posts"
PUBLIC = 100
FRIENDS = 200
PRIVATE = 300
PER_PAGE = 40
id = Column(Integer, primary_key=True)
author_id = Column(Integer, ForeignKey(User.id, ondelete="CASCADE"), nullable=False)
title = Column(Unicode(200))
description = Column(UnicodeText)
link = Column(String(250))
date_created = Column(DateTime, default=datetime.utcnow)
score = Column(Integer, default=1)
num_comments = Column(Integer, default=0)
votes = Column(Text)
access = Column(Integer, default=PUBLIC)
_tags = Column("tags", UnicodeText)
author = Relationship(User, innerjoin=True, lazy="joined")
|
class Post(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 7 | 18 | 17 | 17 | 0 | 18 | 17 | 17 | 0 | 1 | 0 | 0 |
3,841 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/tests/common.py
|
tests.common.ParentWithSchema
|
class ParentWithSchema(Base):
__tablename__ = "parent"
__table_args__ = {"schema": "eralchemy_test"}
id = Column(Integer, primary_key=True)
name = Column(String(255))
|
class ParentWithSchema(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 1 | 0 | 0 |
3,842 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/forum.py
|
forum.Comment
|
class Comment(Base):
__tablename__ = "comments"
PER_PAGE = 20
id = Column(Integer, primary_key=True)
author_id = Column(Integer, ForeignKey(User.id, ondelete="CASCADE"), nullable=False)
post_id = Column(Integer, ForeignKey(Post.id, ondelete="CASCADE"), nullable=False)
parent_id = Column(Integer, ForeignKey("comments.id", ondelete="CASCADE"))
comment = Column(UnicodeText)
date_created = Column(DateTime, default=datetime.utcnow)
score = Column(Integer, default=1)
votes = Column(Text)
author = Relationship(User, innerjoin=True, lazy="joined")
post = Relationship(Post, innerjoin=True, lazy="joined")
parent = Relationship("Comment", remote_side=[id])
|
class Comment(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 9 | 14 | 13 | 13 | 0 | 14 | 13 | 13 | 0 | 1 | 0 | 0 |
3,843 |
Alexis-benoist/eralchemy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alexis-benoist_eralchemy/tests/test_sqla_multi_key.py
|
tests.test_sqla_multi_key.test_compound_one_to_one_parent.Child
|
class Child(Base):
__tablename__ = "child"
id = Column(String(), ForeignKey(Parent.id), primary_key=True)
id2 = Column(String(), ForeignKey(Parent.id2), primary_key=True)
|
class Child(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
3,844 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/example/flask-sqla.py
|
flask-sqla.User
|
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(120), unique=True, nullable=False)
|
class User(db.Model):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
3,845 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/tests/common.py
|
tests.common.Parent
|
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
name = Column(String(255))
|
class Parent(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
3,846 |
Alexis-benoist/eralchemy
|
Alexis-benoist_eralchemy/eralchemy/models.py
|
eralchemy.models.Column
|
class Column(Drawable):
"""Represents a Column in the intermediaty syntax."""
RE = re.compile(
r'(?P<primary>\*?)(?P<name>\w+(\s*\w+)*)\s*(\{label:\s*"(?P<label>[^"]+)"\})?',
)
@staticmethod
def make_from_match(match: re.Match) -> Column:
return Column(
name=match.group("name"),
type=match.group("label"),
is_key="*" in match.group("primary"),
is_null="*" not in match.group("primary"),
)
def __init__(self, name: str, type=None, is_key: bool = False, is_null=None):
"""Initialize the Column class.
:param name: (str) Name of the column
:param type:
:param is_key:
:param is_null:
:return:
"""
self.name = name
self.type = type
self.is_key = is_key
if is_null is None:
self.is_null = not is_key
else:
self.is_null = is_null
def __lt__(self, other: Column) -> bool:
if self.is_key > other.is_key:
return True
elif self.is_key < other.is_key:
return False
else:
return self.name < other.name
@property
def key_symbol(self) -> str:
return "*" if self.is_key else ""
def to_markdown(self) -> str:
name = sanitize_mermaid(self.name)
return f' {self.key_symbol}{name} {{label:"{self.type}"}}'
def to_mermaid(self) -> str:
return " {}{} {}{}".format(
self.key_symbol,
self.type.replace("(", "<").replace(")", ">"),
self.name,
" NOT NULL" if not self.is_null else "",
)
def to_mermaid_er(self) -> str:
type_str = self.type.replace(" ", "_")
name = sanitize_mermaid(self.name, is_er=True)
return f" {type_str} {name} {'PK' if self.is_key else ''}"
def to_dot(self) -> str:
base = ROW_TAGS.format(
' ALIGN="LEFT" {port}',
"{key_opening}{col_name}{key_closing} {type}{null}",
)
return base.format(
port=f'PORT="{self.name}"' if self.name else "",
key_opening="<u>" if self.is_key else "",
key_closing="</u>" if self.is_key else "",
col_name=FONT_TAGS.format(self.name),
type=(FONT_TAGS.format(" [{}]").format(self.type) if self.type is not None else ""),
null=" NOT NULL" if not self.is_null else "",
)
|
class Column(Drawable):
'''Represents a Column in the intermediaty syntax.'''
@staticmethod
def make_from_match(match: re.Match) -> Column:
pass
def __init__(self, name: str, type=None, is_key: bool = False, is_null=None):
'''Initialize the Column class.
:param name: (str) Name of the column
:param type:
:param is_key:
:param is_null:
:return:
'''
pass
def __lt__(self, other: Column) -> bool:
pass
@property
def key_symbol(self) -> str:
pass
def to_markdown(self) -> str:
pass
def to_mermaid(self) -> str:
pass
def to_mermaid_er(self) -> str:
pass
def to_dot(self) -> str:
pass
| 11 | 2 | 7 | 0 | 6 | 1 | 2 | 0.14 | 1 | 2 | 0 | 0 | 7 | 4 | 8 | 33 | 75 | 10 | 57 | 20 | 46 | 8 | 30 | 18 | 21 | 6 | 5 | 1 | 19 |
3,847 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/_random_generator.py
|
verarandom._random_generator.RandomConfig
|
class RandomConfig:
# noinspection PyUnresolvedReferences
""" Configuration used by :py:class:`verarandom.Verarandom`
:param MAX_INTEGER: maximum integer that may be requested
:param MIN_INTEGER: minimum integer that may be requested
:param MAX_NUMBER_OF_INTEGERS: integers limit for a single request
:param MAX_NUMBER_OF_FLOATS: floats limit for a single request
"""
MAX_INTEGER: int
MIN_INTEGER: int
MAX_NUMBER_OF_INTEGERS: int
MAX_NUMBER_OF_FLOATS: int
|
class RandomConfig:
''' Configuration used by :py:class:`verarandom.Verarandom`
:param MAX_INTEGER: maximum integer that may be requested
:param MIN_INTEGER: minimum integer that may be requested
:param MAX_NUMBER_OF_INTEGERS: integers limit for a single request
:param MAX_NUMBER_OF_FLOATS: floats limit for a single request
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 1 | 5 | 1 | 4 | 7 | 5 | 1 | 4 | 0 | 0 | 0 | 0 |
3,848 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/errors.py
|
verarandom.errors.BitQuotaExceeded
|
class BitQuotaExceeded(VeraRandomError):
""" IP has exceeded bit quota and is not allowed to make further requests. """
|
class BitQuotaExceeded(VeraRandomError):
''' IP has exceeded bit quota and is not allowed to make further requests. '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
3,849 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/errors.py
|
verarandom.errors.RandomNumberLimitTooSmall
|
class RandomNumberLimitTooSmall(RandomRequestFieldError):
""" Min random number requested is too small for the service's API """
|
class RandomNumberLimitTooSmall(RandomRequestFieldError):
''' Min random number requested is too small for the service's API '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
3,850 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/errors.py
|
verarandom.errors.RandomNumberLimitTooLarge
|
class RandomNumberLimitTooLarge(RandomRequestFieldError):
""" Max random number requested is too large for the service's API """
|
class RandomNumberLimitTooLarge(RandomRequestFieldError):
''' Max random number requested is too large for the service's API '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
3,851 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/errors.py
|
verarandom.errors.NoRandomNumbersRequested
|
class NoRandomNumbersRequested(RandomRequestFieldError):
""" Attempted to request 0 numbers """
|
class NoRandomNumbersRequested(RandomRequestFieldError):
''' Attempted to request 0 numbers '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
3,852 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/errors.py
|
verarandom.errors.HTTPError
|
class HTTPError(VeraRandomError):
""" An HTTP error occured """
|
class HTTPError(VeraRandomError):
''' An HTTP error occured '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
3,853 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/random_org_v1.py
|
verarandom.random_org_v1._RandintsToFloatOptions
|
class _RandintsToFloatOptions(IntEnum):
RANDINTS_QUANTITY = 3
RANDINTS_NUMBER_OF_DIGITS = 5
|
class _RandintsToFloatOptions(IntEnum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 3 | 0 | 0 |
3,854 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/errors.py
|
verarandom.errors.TooManyRandomNumbersRequested
|
class TooManyRandomNumbersRequested(RandomRequestFieldError):
""" Attempted to request too many numbers for the service's API """
|
class TooManyRandomNumbersRequested(RandomRequestFieldError):
''' Attempted to request too many numbers for the service's API '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
3,855 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/random_org_v1.py
|
verarandom.random_org_v1._RandintRequestFields
|
class _RandintRequestFields(Enum):
RANDOMIZATION = 'rnd'
TRULY_RANDOM = 'new'
BASE = 'base'
BASE_10 = '10'
NUM = 'num'
MIN = 'min'
MAX = 'max'
COL = 'col'
|
class _RandintRequestFields(Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 9 | 0 | 9 | 9 | 8 | 0 | 9 | 9 | 8 | 0 | 4 | 0 | 0 |
3,856 |
AliGhahraei/verarandom
|
AliGhahraei_verarandom/verarandom/errors.py
|
verarandom.errors.RandomRequestFieldError
|
class RandomRequestFieldError(VeraRandomError, ValueError):
""" At least one of the request's fields is invalid """
|
class RandomRequestFieldError(VeraRandomError, ValueError):
''' At least one of the request's fields is invalid '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 0 | 4 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
3,857 |
AliLozano/django-messages-extends
|
AliLozano_django-messages-extends/tests/tests.py
|
tests.tests.MessagesClient
|
class MessagesClient(Client):
""" Baseline Client for Messages Extends. This is needed to hook messages into the client
and assign the user attribute to the request object"""
def __init__(self, **defaults):
"""Adds messages to the request"""
super(MessagesClient, self).__init__(**defaults)
if 'django.contrib.messages' in settings.INSTALLED_APPS:
self._messages = default_storage(self)
def login(self, **credentials):
"""This sets the user attibute for a logged in user"""
if super(MessagesClient, self).login(**credentials):
self.user = authenticate(**credentials)
return True
else:
self.user = AnonymousUser()
return False
def logout(self):
logout = super(MessagesClient, self).logout()
if hasattr(self, 'user'):
self.user = None
return logout
|
class MessagesClient(Client):
''' Baseline Client for Messages Extends. This is needed to hook messages into the client
and assign the user attribute to the request object'''
def __init__(self, **defaults):
'''Adds messages to the request'''
pass
def login(self, **credentials):
'''This sets the user attibute for a logged in user'''
pass
def logout(self):
pass
| 4 | 3 | 6 | 0 | 5 | 1 | 2 | 0.24 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 3 | 24 | 3 | 17 | 7 | 13 | 4 | 16 | 7 | 12 | 2 | 1 | 1 | 6 |
3,858 |
AliLozano/django-messages-extends
|
AliLozano_django-messages-extends/messages_extends/storages.py
|
messages_extends.storages.StickyStorage
|
class StickyStorage(BaseStorage):
"""
Keep messages that are sticky in memory
"""
def __init__(self, request, *args, **kwargs):
super(StickyStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the memory.
"""
return [], False
def _store(self, messages, response, *args, **kwargs):
"""
Delete all messages that are sticky and return the other messages
This storage never save objects
"""
return [message for message in messages if not message.level in STICKY_MESSAGE_LEVELS]
|
class StickyStorage(BaseStorage):
'''
Keep messages that are sticky in memory
'''
def __init__(self, request, *args, **kwargs):
pass
def _get(self, *args, **kwargs):
'''
Retrieves a list of messages from the memory.
'''
pass
def _store(self, messages, response, *args, **kwargs):
'''
Delete all messages that are sticky and return the other messages
This storage never save objects
'''
pass
| 4 | 3 | 4 | 0 | 2 | 2 | 1 | 1.43 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 3 | 20 | 3 | 7 | 4 | 3 | 10 | 7 | 4 | 3 | 1 | 1 | 0 | 3 |
3,859 |
AliLozano/django-messages-extends
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/AliLozano_django-messages-extends/tests/tests.py
|
tests.tests.MessagesTests
|
class MessagesTests(TestCase):
"""Test out homes app"""
client_class = MessagesClient
def _get_user(self, username="bob"):
"""Give up a user"""
user = User.objects.create(username=username)
user.set_password('password')
user.save()
return user
@override_settings(MESSAGE_LEVEL=1)
def test_persist_message_levels(self):
"""Test the basic messaging"""
user = self._get_user()
self.client.login(username=user.username, password='password')
for level in PERSISTENT_MESSAGE_LEVELS:
msg = 'Test {} - {}'.format(level, datetime.datetime.now())
messages.add_message(self.client, level, msg)
result = Message.objects.get(level=level)
self.assertEqual(result.message, msg)
self.assertEqual(result.user, user)
self.assertEqual(result.extra_tags, u'')
self.assertIsNone(result.expires)
self.assertIsNotNone(result.created)
self.assertFalse(result.read)
def test_mark_as_read(self):
"""Test the basic messaging"""
self.client.login(username=self._get_user().username,
password='password')
messages.add_message(self.client, WARNING_PERSISTENT, "Warning..")
result = Message.objects.all()[0]
self.assertFalse(result.read)
url = reverse('messages:message_mark_read',
kwargs={'message_id': result.id})
self.client.get(url)
result = Message.objects.all()[0]
self.assertTrue(result.read)
def test_for_other_user(self):
"""Test the basic message for another user"""
self.client.login(username=self._get_user().username,
password='password')
user2 = self._get_user(username="john")
messages.add_message(
self.client, WARNING_PERSISTENT, "Warning..", user=user2)
result = Message.objects.all()[0]
self.assertEqual(result.user, user2)
def test_mark_message_read_for_other_user(self):
"""Test the basic message for another user"""
res = self.client.login(
username=self._get_user().username, password='password')
user2 = self._get_user(username="john")
messages.add_message(
self.client, WARNING_PERSISTENT, "Warning..", user=user2)
result = Message.objects.all()[0]
self.assertEqual(result.user, user2)
url = reverse('messages:message_mark_read',
kwargs={'message_id': result.id})
self.client.get(url)
result = Message.objects.all()[0]
self.assertFalse(result.read)
def test_storages__get(self):
"""Unit test for storages.PersistentStorage._get, which gave bugs
with Django 2.0"""
rf = RequestFactory()
req = rf.get("/")
req.user = self._get_user(username="foo")
ps = PersistentStorage(req)
no_called = []
def _patched_queryset(*args, **kw):
no_called.append(1)
ps._message_queryset = _patched_queryset
ps._get()
self.assertEqual(no_called[0], 1)
def test_delete(self):
user = self._get_user()
self.client.login(username=user.username, password='password')
messages.add_message(self.client, WARNING_PERSISTENT, "Warning Test")
self.assertEqual(Message.objects.count(), 1)
Message.objects.filter(user=user).first().delete()
self.assertEqual(Message.objects.count(), 0)
@mock.patch.object(Collector, 'can_fast_delete')
def test_delete_user_complex_model(self, method_mock):
method_mock.return_value = False
user = self._get_user()
self.client.login(username=user.username, password='password')
# Create duplicated messages
messages.add_message(self.client, WARNING_PERSISTENT, "Warning Test")
messages.add_message(self.client, WARNING_PERSISTENT, "Warning Test")
self.assertEqual(Message.objects.count(), 2)
# User cascade deletes Message
user.delete()
self.assertEqual(Message.objects.count(), 0)
|
class MessagesTests(TestCase):
'''Test out homes app'''
def _get_user(self, username="bob"):
'''Give up a user'''
pass
@override_settings(MESSAGE_LEVEL=1)
def test_persist_message_levels(self):
'''Test the basic messaging'''
pass
def test_mark_as_read(self):
'''Test the basic messaging'''
pass
def test_for_other_user(self):
'''Test the basic message for another user'''
pass
def test_mark_message_read_for_other_user(self):
'''Test the basic message for another user'''
pass
def test_storages__get(self):
'''Unit test for storages.PersistentStorage._get, which gave bugs
with Django 2.0'''
pass
def _patched_queryset(*args, **kw):
pass
def test_delete(self):
pass
@mock.patch.object(Collector, 'can_fast_delete')
def test_delete_user_complex_model(self, method_mock):
pass
| 12 | 7 | 10 | 1 | 8 | 1 | 1 | 0.14 | 1 | 3 | 2 | 0 | 8 | 1 | 8 | 8 | 97 | 13 | 74 | 33 | 62 | 10 | 72 | 30 | 62 | 2 | 1 | 1 | 10 |
3,860 |
AliLozano/django-messages-extends
|
AliLozano_django-messages-extends/messages_extends/migrations/0001_initial.py
|
messages_extends.migrations.0001_initial.Migration
|
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('message', models.TextField()),
('level', models.IntegerField(choices=[(11, 'PERSISTENT DEBUG'), (21, 'PERSISTENT INFO'), (26, 'PERSISTENT SUCCESS'), (31, 'PERSISTENT WARNING'), (41, 'PERSISTENT ERROR')])),
('extra_tags', models.CharField(max_length=128)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('read', models.BooleanField(default=False)),
('expires', models.DateTimeField(null=True, blank=True)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
],
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 2 | 20 | 3 | 19 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
3,861 |
AliLozano/django-messages-extends
|
AliLozano_django-messages-extends/messages_extends/exceptions.py
|
messages_extends.exceptions.LevelOfMessageException
|
class LevelOfMessageException(Exception):
def __init__(self, *args, **kwargs):
super(LevelOfMessageException, self).__init__(*args, **kwargs)
def __str__(self):
return "The level of the message, can't be proccess by this storage"
|
class LevelOfMessageException(Exception):
def __init__(self, *args, **kwargs):
pass
def __str__(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 12 | 7 | 2 | 5 | 3 | 2 | 0 | 5 | 3 | 2 | 1 | 3 | 0 | 2 |
3,862 |
AliLozano/django-messages-extends
|
AliLozano_django-messages-extends/messages_extends/models.py
|
messages_extends.models.Message
|
class Message(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True,
on_delete=models.CASCADE)
message = models.TextField()
LEVEL_CHOICES = (
(messages_extends.DEBUG_PERSISTENT, 'PERSISTENT DEBUG'),
(messages_extends.INFO_PERSISTENT, 'PERSISTENT INFO'),
(messages_extends.SUCCESS_PERSISTENT, 'PERSISTENT SUCCESS'),
(messages_extends.WARNING_PERSISTENT, 'PERSISTENT WARNING'),
(messages_extends.ERROR_PERSISTENT, 'PERSISTENT ERROR'),
)
level = models.IntegerField(choices=LEVEL_CHOICES)
extra_tags = models.CharField(max_length=128)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
read = models.BooleanField(default=False)
expires = models.DateTimeField(null=True, blank=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and\
self.message == other.message
__hash__ = models.Model.__hash__
def __str__(self):
return force_str(self.message)
def _prepare_message(self):
"""
Prepares the message for saving by forcing the ``message``
and ``extra_tags`` and ``subject`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_str`` implementation for details).
"""
self.message = force_str(self.message, strings_only=True)
self.extra_tags = force_str(self.extra_tags, strings_only=True)
def save(self, *args, **kwargs):
self._prepare_message()
super(Message, self).save(*args, **kwargs)
def _get_tags(self):
label_tag = force_str(LEVEL_TAGS.get(self.level, ''),
strings_only=True)
extra_tags = force_str(self.extra_tags, strings_only=True)
if self.read:
read_tag = "read"
else:
read_tag = "unread"
if extra_tags and label_tag:
return u' '.join([extra_tags, label_tag, read_tag])
elif extra_tags:
return u' '.join([extra_tags, read_tag])
elif label_tag:
return u' '.join([label_tag, read_tag])
return read_tag
tags = property(_get_tags)
|
class Message(models.Model):
def __eq__(self, other):
pass
def __str__(self):
pass
def _prepare_message(self):
'''
Prepares the message for saving by forcing the ``message``
and ``extra_tags`` and ``subject`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_str`` implementation for details).
'''
pass
def save(self, *args, **kwargs):
pass
def _get_tags(self):
pass
| 6 | 1 | 7 | 1 | 5 | 1 | 2 | 0.13 | 1 | 1 | 0 | 0 | 5 | 0 | 5 | 5 | 61 | 10 | 45 | 20 | 39 | 6 | 33 | 20 | 27 | 5 | 1 | 1 | 9 |
3,863 |
AliLozano/django-messages-extends
|
AliLozano_django-messages-extends/messages_extends/admin.py
|
messages_extends.admin.MessageAdmin
|
class MessageAdmin(admin.ModelAdmin):
list_display = ['level', 'user', 'message', 'created', 'read']
|
class MessageAdmin(admin.ModelAdmin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
3,864 |
AliLozano/django-messages-extends
|
AliLozano_django-messages-extends/messages_extends/storages.py
|
messages_extends.storages.FallbackStorage
|
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend, by default use
MESSAGES_STORAGES = ('messages_extends.storages.StickyStorage',
'messages_extends.storages.PersistentStorage',
'django.contrib.messages.storage.session.CookieStorage',
'django.contrib.messages.storage.session.SessionStorage'))
if you want change the backends, put your custom storages:
MESSAGES_STORAGES = ('foo.your_storage', 'cookie_storage')
in your settings
"""
storages_names = getattr(settings, 'MESSAGES_STORAGES',
('messages_extends.storages.StickyStorage',
'messages_extends.storages.PersistentStorage',
'django.contrib.messages.storage.cookie.CookieStorage',
'django.contrib.messages.storage.session.SessionStorage'))
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
# get instances of classes of storages_names
self.storages = [get_storage(storage)(*args, **kwargs)
for storage in self.storages_names]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
def add(self, level, message, extra_tags='', *args, **kwargs):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
for storage in self.storages:
if hasattr(storage, 'process_message'):
message = storage.process_message(message, *args, **kwargs)
if not message:
return
self._queued_messages.append(message)
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
if hasattr(message, '_prepare'):
message._prepare()
|
class FallbackStorage(BaseStorage):
'''
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend, by default use
MESSAGES_STORAGES = ('messages_extends.storages.StickyStorage',
'messages_extends.storages.PersistentStorage',
'django.contrib.messages.storage.session.CookieStorage',
'django.contrib.messages.storage.session.SessionStorage'))
if you want change the backends, put your custom storages:
MESSAGES_STORAGES = ('foo.your_storage', 'cookie_storage')
in your settings
'''
def __init__(self, *args, **kwargs):
pass
def _get(self, *args, **kwargs):
'''
Gets a single list of messages from all storage backends.
'''
pass
def _store(self, messages, response, *args, **kwargs):
'''
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
'''
pass
def add(self, level, message, extra_tags='', *args, **kwargs):
'''
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
'''
pass
def _prepare_messages(self, messages):
'''
Prepares a list of messages for storage.
'''
pass
| 6 | 5 | 15 | 1 | 9 | 5 | 4 | 0.72 | 1 | 3 | 0 | 0 | 5 | 3 | 5 | 5 | 97 | 11 | 50 | 16 | 44 | 36 | 43 | 16 | 37 | 6 | 1 | 3 | 19 |
3,865 |
Alidron/spyrk
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alidron_spyrk/spyrk/spark_cloud.py
|
spyrk.spark_cloud.SparkCloud
|
class SparkCloud(object):
"""Provides access to the Spark Cloud.
>>> spark = SparkCloud(USERNAME, PASSWORD)
# Or
>>> spark = SparkCloud(ACCESS_TOKEN)
# List devices
>>> print spark.devices
# Access device
>>> spark.devices['captain_hamster']
# Or, shortcut form
>>> spark.captain_hamster
# List functions and variables of a device
>>> print spark.captain_hamster.functions
>>> print spark.captain_hamster.variables
# Tell if a device is connected
>>> print spark.captain_hamster.connected
# Call a function
>>> spark.captain_hamster.digitalwrite('D7', 'HIGH')
>>> print spark.captain_hamster.analogread('A0')
# (or any of your own custom function)
# Get variable value
>>> spark.captain_hamster.myvariable
"""
def __init__(self, username_or_access_token, password=None, spark_api=Hammock('https://api.particle.io'), timeout=30):
"""Initialise the connection to a Spark Cloud.
If you give a user name and password an access token will be requested.
The list of known devices attached to your account will be requested.
If you have several devices and not all of them are connected it will
take a long time to create the object. The Spark Cloud will take ~30
seconds (per device?) to reply as it waits for an answer from the
disconnected devices.
"""
self.spark_api = spark_api
if password is None:
self.access_token = username_or_access_token
else:
self.access_token = self._login(username_or_access_token, password)
self.spark_api = self.spark_api.v1.devices
self.timeout = timeout
@staticmethod
def _check_error(response):
"""Raises an exception if the Spark Cloud returned an error."""
if (not response.ok) or (response.status_code != 200):
raise Exception(
response.json()['error'] + ': ' +
response.json()['error_description']
)
def _login(self, username, password):
"""Proceed to login to the Spark Cloud and returns an access token."""
data = {
'username': username,
'password': password,
'grant_type': 'password'
}
r = self.spark_api.oauth.token.POST(
auth=('spark', 'spark'), data=data, timeout=self.timeout)
self._check_error(r)
return r.json()['access_token']
@timed_cached_property(ttl=10) # cache the device for 10 seconds.
def devices(self):
"""Create a dictionary of devices known to the user account."""
params = {'access_token': self.access_token}
r = self.spark_api.GET(params=params, timeout=self.timeout)
self._check_error(r)
json_list = r.json()
devices_dict = {}
if json_list:
# it is possible the keys in json responses varies from one device to another: compute the set of all keys
allKeys = {'functions', 'variables', 'api',
'requires_deep_update', 'status'} # added by device_info
for device_json in json_list:
allKeys.update(device_json.keys())
Device = _BaseDevice.make_device_class(
self, allKeys, timeout=self.timeout)
for d in json_list:
if d["connected"]:
info = self._get_device_info(d['id'])
d['functions'] = info.get('functions')
d['variables'] = info.get('variables')
d['api'] = self.spark_api(d['id'])
d['requires_deep_update'] = d.get(
'requires_deep_update', False)
d['status'] = info.get('status')
# ensure the set of all keys is present in the dictionnary (Device constructor requires all keys present)
[d.setdefault(key, None) for key in allKeys]
devices_dict[d['name']] = Device(**d)
return devices_dict
def _get_device_info(self, device_id):
"""Queries the Spark Cloud for detailed information about a device."""
params = {'access_token': self.access_token}
r = self.spark_api(device_id).GET(params=params, timeout=30)
self._check_error(r)
return r.json()
def __getattr__(self, name):
"""Returns a Device object as an attribute of the SparkCloud object."""
if name in self.devices:
return self.devices[name]
else:
raise AttributeError()
|
class SparkCloud(object):
'''Provides access to the Spark Cloud.
>>> spark = SparkCloud(USERNAME, PASSWORD)
# Or
>>> spark = SparkCloud(ACCESS_TOKEN)
# List devices
>>> print spark.devices
# Access device
>>> spark.devices['captain_hamster']
# Or, shortcut form
>>> spark.captain_hamster
# List functions and variables of a device
>>> print spark.captain_hamster.functions
>>> print spark.captain_hamster.variables
# Tell if a device is connected
>>> print spark.captain_hamster.connected
# Call a function
>>> spark.captain_hamster.digitalwrite('D7', 'HIGH')
>>> print spark.captain_hamster.analogread('A0')
# (or any of your own custom function)
# Get variable value
>>> spark.captain_hamster.myvariable
'''
def __init__(self, username_or_access_token, password=None, spark_api=Hammock('https://api.particle.io'), timeout=30):
'''Initialise the connection to a Spark Cloud.
If you give a user name and password an access token will be requested.
The list of known devices attached to your account will be requested.
If you have several devices and not all of them are connected it will
take a long time to create the object. The Spark Cloud will take ~30
seconds (per device?) to reply as it waits for an answer from the
disconnected devices.
'''
pass
@staticmethod
def _check_error(response):
'''Raises an exception if the Spark Cloud returned an error.'''
pass
def _login(self, username, password):
'''Proceed to login to the Spark Cloud and returns an access token.'''
pass
@timed_cached_property(ttl=10)
def devices(self):
'''Create a dictionary of devices known to the user account.'''
pass
def _get_device_info(self, device_id):
'''Queries the Spark Cloud for detailed information about a device.'''
pass
def __getattr__(self, name):
'''Returns a Device object as an attribute of the SparkCloud object.'''
pass
| 9 | 7 | 13 | 2 | 9 | 3 | 2 | 0.67 | 1 | 3 | 1 | 0 | 5 | 3 | 6 | 6 | 119 | 24 | 58 | 25 | 49 | 39 | 47 | 23 | 40 | 5 | 1 | 3 | 13 |
3,866 |
Alidron/spyrk
|
Alidron_spyrk/spyrk/spark_cloud.py
|
spyrk.spark_cloud._BaseDevice
|
class _BaseDevice(object):
"""Parent class for the dynamic Device class.
The Device class being made of whatever fields the Spark Cloud API gives us,
it has to be contructed on the fly once we know those fields.
The generated Device class is subclassing this _BaseDevice as well as a
nametuple.
The namedtuple host all static fields while _BaseDevice host methods
extending how a Device object should behave.
"""
@staticmethod
def make_device_class(spark_cloud, entries, timeout=30):
"""Returns a dynamic Device class based on what a GET device list from
the Spark Cloud returns.
spark_cloud parameter should be the caller instance of SparkCloud.
entries parameter should be the list of fields the Spark Cloud API is
returning.
"""
attrs = list(
set(
list(entries) + [
'requires_deep_update', 'functions', 'variables', 'api', 'status'
]
)
)
return type(
'Device',
(_BaseDevice, namedtuple('Device', attrs)),
{'__slots__': (), 'spark_cloud': spark_cloud, 'timeout' : timeout}
)
def __getattr__(self, name):
"""Returns virtual attributes corresponding to function or variable
names.
"""
params = {'access_token': self.spark_cloud.access_token}
if not self.connected:
raise IOError("{}.{} is not available: the spark device is not connected.".format(self.name, name))
if name in self.functions:
def fcall(*args):
data = {'params': ','.join(args)}
r = self.api(name).POST(params=params, data=data, timeout=self.timeout)
self.spark_cloud._check_error(r)
return r.json()['return_value']
return fcall
elif name in self.variables:
r = self.api(name).GET(params=params, timeout=30)
self.spark_cloud._check_error(r)
return r.json()['result']
else:
raise AttributeError()
|
class _BaseDevice(object):
'''Parent class for the dynamic Device class.
The Device class being made of whatever fields the Spark Cloud API gives us,
it has to be contructed on the fly once we know those fields.
The generated Device class is subclassing this _BaseDevice as well as a
nametuple.
The namedtuple host all static fields while _BaseDevice host methods
extending how a Device object should behave.
'''
@staticmethod
def make_device_class(spark_cloud, entries, timeout=30):
'''Returns a dynamic Device class based on what a GET device list from
the Spark Cloud returns.
spark_cloud parameter should be the caller instance of SparkCloud.
entries parameter should be the list of fields the Spark Cloud API is
returning.
'''
pass
def __getattr__(self, name):
'''Returns virtual attributes corresponding to function or variable
names.
'''
pass
def fcall(*args):
pass
| 5 | 3 | 17 | 3 | 12 | 3 | 2 | 0.53 | 1 | 4 | 0 | 0 | 1 | 0 | 2 | 2 | 63 | 14 | 32 | 10 | 27 | 17 | 19 | 9 | 15 | 4 | 1 | 1 | 6 |
3,867 |
Alidron/spyrk
|
Alidron_spyrk/setup.py
|
setup.PyTest
|
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
|
class PyTest(TestCommand):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 3 | 3 | 3 | 15 | 2 | 13 | 10 | 8 | 0 | 13 | 10 | 8 | 1 | 1 | 0 | 3 |
3,868 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/alignak-backend-cli-tests.py
|
alignak-backend-cli-tests.TestAlignakBackendCli
|
class TestAlignakBackendCli(unittest2.TestCase):
"""Test class for alignak-backend-cli"""
@classmethod
def setUpClass(cls):
"""Prepare the Alignak backend"""
print("Start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-cli-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:param module:
:return: None
"""
print("Stop alignak backend")
cls.pid.kill()
def test_start_00_errors(self):
# pylint: disable=no-self-use
""" Start CLI without parameters or erroneous parameters"""
print('test application default start')
print("Coverage env: %s" % os.environ.get('COV_CORE_SOURCE', 'unknown'))
fnull = open(os.devnull, 'w')
print("Launching application without parameters...")
# from alignak_backend_client.backend_client import main
# print("Main: %s" % main())
exit_code = subprocess.call(
shlex.split('python ../alignak_backend_client/backend_client.py')
)
assert exit_code == 64
print("Launching application with erroneous parameters...")
exit_code = subprocess.call(
shlex.split('python ../alignak_backend_client/backend_client.py -Z')
)
assert exit_code == 64
exit_code = subprocess.call(
shlex.split('python ../alignak_backend_client/backend_client.py -t realm unknown_action')
)
assert exit_code == 64
exit_code = subprocess.call(
shlex.split('python ../alignak_backend_client/backend_client.py -b http://mybackend -t realm list')
)
assert exit_code == 1
exit_code = subprocess.call(
shlex.split('python ../alignak_backend_client/backend_client.py -b http://127.0.0.1:5000 -u fake -p faka -t realm list')
)
assert exit_code == 1
def test_start_00_help(self):
# pylint: disable=no-self-use
""" Start CLI with help parameter"""
print("Launching application with CLI help...")
exit_code = subprocess.call(
shlex.split('python ../alignak_backend_client/backend_client.py -h')
)
assert exit_code == 0
def test_start_00_quiet_verbose(self):
# pylint: disable=no-self-use
""" Start CLI with quiet/verbose mode"""
# Quiet mode
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "/tmp" -t realm -q list'
))
assert exit_code == 0
# Verbosemode
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "/tmp" -t realm -v list'
))
assert exit_code == 0
def test_start_01_get_default(self):
# pylint: disable=no-self-use
""" CLI to get default backend objects"""
# work_dir = os.path.abspath(os.path.dirname(__file__))
work_dir = '/tmp'
files = ['alignak-object-list-realms.json',
'alignak-object-list-commands.json',
'alignak-object-list-timeperiods.json',
'alignak-object-list-usergroups.json',
'alignak-object-list-hostgroups.json',
'alignak-object-list-servicegroups.json',
'alignak-model-list-users.json',
'alignak-model-list-hosts.json',
'alignak-model-list-services.json',
'alignak-object-list-users.json',
'alignak-object-list-hosts.json',
'alignak-object-list-services.json']
for filename in files:
if os.path.exists(os.path.join(work_dir, filename)):
os.remove(os.path.join(work_dir, filename))
print("Getting the backend default elements...")
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t realm list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t command list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t timeperiod list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t usergroup list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t hostgroup list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t servicegroup list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t user -m list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -m list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service -m list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t user list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list' % work_dir
))
assert exit_code == 0
for filename in files:
print("Exists %s?" % filename)
assert os.path.exists(os.path.join(work_dir, filename))
def test_start_02_create(self):
# pylint: disable=no-self-use
""" CLI to create backend objects"""
work_dir = os.path.abspath(os.path.dirname(__file__))
work_dir = os.path.join(work_dir, 'json')
print("Creating backend elements...")
# Create commands
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t command -d checks-pack-commands.json add' % work_dir
))
assert exit_code == 0
# Create templates
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t user -d checks-pack-users-templates.json add' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d checks-pack-hosts-templates.json add' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service -d checks-pack-services-templates.json add' % work_dir
))
assert exit_code == 0
# Create a realm
# First, dry-run ... it will not create!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t realm -c add test_realm'
))
assert exit_code == 0
# Then we create :)
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t realm add test_realm'
))
assert exit_code == 0
# Already exists!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t realm add test_realm'
))
assert exit_code == 2
# Create hosts
# First, dry-run ... it will not create!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -c add host_without_template' % work_dir
))
assert exit_code == 0
# Then we create :)
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host add host_without_template' % work_dir
))
assert exit_code == 0
# Already exists!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host add host_without_template' % work_dir
))
assert exit_code == 2
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -T windows-passive-host add host_test' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d example_host_data.json add host_test_2' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d example_host_from_template.json add host_test_3' % work_dir
))
assert exit_code == 0
# Get hosts and services lists
# All the hosts (implicit default value)
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" list' % work_dir
))
assert exit_code == 0
# All the hosts
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host list' % work_dir
))
assert exit_code == 0
# Embed the linked resources
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -e list' % work_dir
))
assert exit_code == 0
# A specific host
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host get host_test' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -e get host_test' % work_dir
))
assert exit_code == 0
# A specific host and its services in the same output
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host get host_test/*' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -e get host_test/*' % work_dir
))
assert exit_code == 0
# A specific host service
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service get host_test/nsca_services' % work_dir
))
assert exit_code == 0
# All the services
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list' % work_dir
))
assert exit_code == 0
# The services of the host host_test
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list host_test/*' % work_dir
))
assert exit_code == 0
# The services of an unknown host
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list host_unknown/*' % work_dir
))
assert exit_code == 2
# A specific service of the host host_test
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list host_test/nsca_cpu' % work_dir
))
assert exit_code == 0
def test_start_02_create_nrpe(self):
# pylint: disable=no-self-use
""" CLI to create backend objects - several services with the same name"""
work_dir = os.path.abspath(os.path.dirname(__file__))
work_dir = os.path.join(work_dir, 'json/nrpe')
print("Creating backend elements...")
# Create commands
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t command -d commands.json add' % work_dir
))
assert exit_code == 0
# Create templates
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d hosts-templates.json add' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service -d services-templates.json add' % work_dir
))
assert exit_code == 0
def test_start_03_get_elements(self):
# pylint: disable=no-self-use
""" CLI to get default backend objects"""
# work_dir = os.path.abspath(os.path.dirname(__file__))
work_dir = '/tmp'
files = ['alignak-object-list-realms.json',
'alignak-object-list-commands.json',
'alignak-object-list-timeperiods.json',
'alignak-object-list-usergroups.json',
'alignak-object-list-hostgroups.json',
'alignak-object-list-servicegroups.json',
'alignak-model-list-users.json',
'alignak-model-list-hosts.json',
'alignak-model-list-services.json',
'alignak-object-list-users.json',
'alignak-object-list-hosts.json',
'alignak-object-list-services.json']
for filename in files:
if os.path.exists(os.path.join(work_dir, filename)):
os.remove(os.path.join(work_dir, filename))
print("Getting the backend default elements...")
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t realm list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t command list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t timeperiod list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t usergroup list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t hostgroup list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t servicegroup list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t user -m list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -m list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service -m list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t user list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list' % work_dir
))
assert exit_code == 0
for filename in files:
print("Exists %s?" % filename)
assert os.path.exists(os.path.join(work_dir, filename))
def test_start_04_update(self):
# pylint: disable=no-self-use
""" CLI to create backend objects"""
work_dir = os.path.abspath(os.path.dirname(__file__))
work_dir = os.path.join(work_dir, 'json')
print("Updating backend elements...")
# Unknown data file
# First, dry-run ... it will not update!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d unknown.json -c update host_test' % work_dir
))
assert exit_code == 2
# Then we update
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d unknown.json update host_test' % work_dir
))
assert exit_code == 2
# Update an host
# First, dry-run ... it will not update!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d example_host_livestate.json -c update host_test' % work_dir
))
assert exit_code == 0
# Then we update
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d example_host_livestate.json update host_test' % work_dir
))
assert exit_code == 0
# And again...
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d example_host_livestate.json update host_test' % work_dir
))
assert exit_code == 0
# And again... re-using read data
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host -d example_host_livestate.json update -i host_test' % work_dir
))
assert exit_code == 0
# Update a service
# First, dry-run ... it will not update!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service -d example_service_livestate.json -c update host_test/nsca_cpu' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service -d example_service_livestate.json update host_test/nsca_cpu' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service -d example_service_livestate.json update -i host_test/nsca_cpu' % work_dir
))
assert exit_code == 0
# Get hosts and services lists
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -c -t host list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -c -t service list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -c -t service list host_test/*' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list host_test/*' % work_dir
))
assert exit_code == 0
def test_start_05_delete(self):
# pylint: disable=no-self-use
""" CLI to delete backend objects"""
work_dir = os.path.abspath(os.path.dirname(__file__))
work_dir = os.path.join(work_dir, 'json')
print("Deleting backend elements...")
# Delete all host services
# First, dry-run ... it will not delete!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t service delete -c host_test/*'
))
assert exit_code == 0
# Then we delete
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t service delete host_test/*'
))
assert exit_code == 0
# Delete host
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t host delete host_test'
))
assert exit_code == 0
# Delete a service of an host
# First, dry-run ... it will not delete!
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t service delete -c host_test_2/nsca_services'
))
assert exit_code == 0
# Delete an unknown service of an host
# First, dry-run ...
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t service delete -c host_test_2/unknown_service'
))
assert exit_code == 2
# Then real request
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t service delete host_test_2/unknown_service'
))
assert exit_code == 2
# Delete all users
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -t user delete'
))
assert exit_code == 0
# Get hosts and services lists
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t host list' % work_dir
))
assert exit_code == 0
exit_code = subprocess.call(shlex.split(
'python ../alignak_backend_client/backend_client.py -f "%s" -t service list' % work_dir
))
assert exit_code == 0
|
class TestAlignakBackendCli(unittest2.TestCase):
'''Test class for alignak-backend-cli'''
@classmethod
def setUpClass(cls):
'''Prepare the Alignak backend'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:param module:
:return: None
'''
pass
def test_start_00_errors(self):
''' Start CLI without parameters or erroneous parameters'''
pass
def test_start_00_help(self):
''' Start CLI with help parameter'''
pass
def test_start_00_quiet_verbose(self):
''' Start CLI with quiet/verbose mode'''
pass
def test_start_01_get_default(self):
''' CLI to get default backend objects'''
pass
def test_start_02_create(self):
''' CLI to create backend objects'''
pass
def test_start_02_create_nrpe(self):
''' CLI to create backend objects - several services with the same name'''
pass
def test_start_03_get_elements(self):
''' CLI to get default backend objects'''
pass
def test_start_04_update(self):
''' CLI to create backend objects'''
pass
def test_start_05_delete(self):
''' CLI to delete backend objects'''
pass
| 14 | 12 | 49 | 3 | 38 | 7 | 2 | 0.18 | 1 | 1 | 0 | 0 | 9 | 0 | 11 | 11 | 549 | 47 | 424 | 35 | 410 | 78 | 225 | 33 | 213 | 4 | 1 | 2 | 17 |
3,869 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_delete.py
|
test_delete.TestDeleteClient
|
class TestDeleteClient(unittest2.TestCase):
"""
Test delete items
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
headers = {'Content-Type': 'application/json'}
params = {'username': 'admin', 'password': 'admin', 'action': 'generate'}
# get token
response = requests.post(cls.backend_address + '/login', json=params, headers=headers)
resp = response.json()
cls.token = resp['token']
cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')
# get realms
response = requests.get(cls.backend_address + '/realm',
auth=cls.auth)
resp = response.json()
cls.realmAll_id = resp['_items'][0]['_id']
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:param module:
:return: None
"""
print("stop alignak backend")
cls.pid.kill()
def test_1_delete_successful(self):
"""
Test delete a timeperiod successfully
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Create a new timeperiod
data = {
"name": "Testing TP",
"alias": "Test TP",
"dateranges": [
{u'monday': u'09:00-17:00'},
{u'tuesday': u'09:00-17:00'},
{u'wednesday': u'09:00-17:00'},
{u'thursday': u'09:00-17:00'},
{u'friday': u'09:00-17:00'}
],
"_realm": self.realmAll_id
}
response = backend.post('timeperiod', data=data)
assert_true(response['_status'] == 'OK')
timeperiod_id = response['_id']
timeperiod_etag = response['_etag']
headers = {'If-Match': timeperiod_etag}
response = backend.delete('/'.join(['timeperiod', timeperiod_id]), headers=headers)
assert_true(response['_status'] == 'OK')
def test_2_delete_exceptions(self):
"""
Test delete a timeperiod with errors (so exceptions)
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Create a new timeperiod
data = {
"name": "Testing TP",
"alias": "Test TP",
"dateranges": [
{u'monday': u'09:00-17:00'},
{u'tuesday': u'09:00-17:00'},
{u'wednesday': u'09:00-17:00'},
{u'thursday': u'09:00-17:00'},
{u'friday': u'09:00-17:00'}
],
"_realm": self.realmAll_id
}
response = backend.post('timeperiod', data=data)
assert_true(response['_status'] == 'OK')
timeperiod_id = response['_id']
timeperiod_etag = response['_etag']
with assert_raises(BackendException) as cm:
headers = {'If-Match': timeperiod_etag}
response = backend.delete('/'.join(['timeperiod', '5' + timeperiod_id]), headers)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 404, str(ex))
def test_3_delete_connection_error(self):
"""
Backend connection error when deleting an object...
:return: None
"""
print('test connection error when deleting an object')
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Create a new timeperiod
data = {
"name": "Testing TP 2",
"alias": "Test TP 2",
"dateranges": [
{u'monday': u'09:00-17:00'},
{u'tuesday': u'09:00-17:00'},
{u'wednesday': u'09:00-17:00'},
{u'thursday': u'09:00-17:00'},
{u'friday': u'09:00-17:00'}
],
"_realm": self.realmAll_id
}
response = backend.post('timeperiod', data=data)
assert_true(response['_status'] == 'OK')
timeperiod_id = response['_id']
timeperiod_etag = response['_etag']
headers = {'If-Match': timeperiod_etag}
response = backend.delete('/'.join(['timeperiod', timeperiod_id]), headers=headers)
assert_true(response['_status'] == 'OK')
print("stop the alignak backend")
self.pid.kill()
with assert_raises(BackendException) as cm:
headers = {'If-Match': timeperiod_etag}
response = backend.delete('/'.join(['timeperiod', timeperiod_id]), headers=headers)
assert_true(response['_status'] == 'OK')
ex = cm.exception
self.assertEqual(ex.code, 1000)
|
class TestDeleteClient(unittest2.TestCase):
'''
Test delete items
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:param module:
:return: None
'''
pass
def test_1_delete_successful(self):
'''
Test delete a timeperiod successfully
:return: None
'''
pass
def test_2_delete_exceptions(self):
'''
Test delete a timeperiod with errors (so exceptions)
:return: None
'''
pass
def test_3_delete_connection_error(self):
'''
Backend connection error when deleting an object...
:return: None
'''
pass
| 8 | 6 | 31 | 4 | 21 | 6 | 1 | 0.3 | 1 | 4 | 2 | 0 | 3 | 0 | 5 | 5 | 166 | 24 | 109 | 35 | 101 | 33 | 66 | 31 | 60 | 1 | 1 | 1 | 5 |
3,870 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_get.py
|
test_get.TestGetClient
|
class TestGetClient(unittest2.TestCase):
"""
Test get items
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
headers = {'Content-Type': 'application/json'}
params = {'username': 'admin', 'password': 'admin', 'action': 'generate'}
# get token
response = requests.post(cls.backend_address + '/login', json=params, headers=headers)
resp = response.json()
cls.token = resp['token']
cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')
# get realms
response = requests.get(cls.backend_address + '/realm',
auth=cls.auth)
resp = response.json()
cls.realmAll_id = resp['_items'][0]['_id']
# Add many hosts
headers = {'Content-Type': 'application/json'}
params = {'name': 'group', '_realm': cls.realmAll_id}
for num in range(100):
params['name'] = 'group ' + str(num)
response = requests.post(cls.backend_address + '/hostgroup', json=params,
headers=headers, auth=cls.auth)
print(response.__dict__)
assert_equal(response.status_code, 201)
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:param module:
:return: None
"""
print("stop alignak backend")
cls.pid.kill()
def test_1_domains(self):
"""
Test get domains (= all resource/enpoints available)
:return: None
"""
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Get all available endpoints
print('get all domains')
# Filter the templates ...
items = backend.get_domains()
print("Got %d elements: %s" % (len(items), items))
assert_true('_items' not in items)
# assert_true(len(items) == 26)
for item in items:
assert_true('href' in item)
assert_true('title' in item)
print("Domain: ", item)
def test_2_all_pages(self):
"""
Get all items (so all pages) of a resource
:return: None
"""
print('get all elements on an endpoint')
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Get all elements
print('get all hostgroups at once')
params = {'max_results': 3}
items = backend.get_all('hostgroup', params=params)
hostgroups = items['_items']
for hostgroup in hostgroups:
print("Group: %s" % hostgroup['name'])
self.assertEqual(len(hostgroups), 101)
def test_3_page_after_page(self):
"""
Get page after page manually
:return: None
"""
print('backend connection with username/password')
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Start with first page ... max_results=3
last_page = False
parameters = {'max_results': 3, 'page': 1}
items = []
while not last_page:
resp = backend.get('hostgroup', params=parameters)
assert_true('_items' in resp)
assert_true('_links' in resp)
assert_true('_meta' in resp)
page_number = int(resp['_meta']['page'])
total = int(resp['_meta']['total'])
max_results = int(resp['_meta']['max_results'])
assert_equal(total, 101)
assert_equal(max_results, 3)
if 'next' in resp['_links']:
# It has pagination, so get items of all pages
parameters['page'] = page_number + 1
else:
last_page = True
assert_equal(page_number, 34)
items.extend(resp['_items'])
print("----------")
print("Got %d elements:" % len(items))
assert_equal(len(items), 101)
# Start with first page ... max_results=10
last_page = False
parameters = {'max_results': 10, 'page': 1}
items = []
while not last_page:
resp = backend.get('hostgroup', params=parameters)
assert_true('_items' in resp)
assert_true('_links' in resp)
assert_true('_meta' in resp)
page_number = int(resp['_meta']['page'])
total = int(resp['_meta']['total'])
max_results = int(resp['_meta']['max_results'])
assert_equal(total, 101)
assert_equal(max_results, 10)
if 'next' in resp['_links']:
# It has pagination, so get items of all pages
parameters['page'] = page_number + 1
else:
last_page = True
assert_equal(page_number, 11)
items.extend(resp['_items'])
# Start with first page ... no max_results
last_page = False
parameters = {'page': 1}
items = []
while not last_page:
resp = backend.get('hostgroup', params=parameters)
assert_true('_items' in resp)
assert_true('_links' in resp)
assert_true('_meta' in resp)
page_number = int(resp['_meta']['page'])
total = int(resp['_meta']['total'])
max_results = int(resp['_meta']['max_results'])
assert_equal(total, 101)
assert_equal(max_results, 25)
if 'next' in resp['_links']:
# It has pagination, so get items of all pages
parameters['page'] = page_number + 1
else:
last_page = True
assert_equal(page_number, 5)
items.extend(resp['_items'])
print("----------")
print("Got %d elements:" % len(items))
assert_equal(len(items), 101)
def test_4_connection_error(self):
"""
Backend connection error when getting an object...
:return: None
"""
print('test connection error when getting an object')
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
print("stop the alignak backend")
self.pid.kill()
with assert_raises(BackendException) as cm:
print('get all hostgroups at once')
params = {'max_results': 3}
backend.get_all('hostgroup', params=params)
ex = cm.exception
self.assertEqual(ex.code, 1000)
|
class TestGetClient(unittest2.TestCase):
'''
Test get items
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:param module:
:return: None
'''
pass
def test_1_domains(self):
'''
Test get domains (= all resource/enpoints available)
:return: None
'''
pass
def test_2_all_pages(self):
'''
Get all items (so all pages) of a resource
:return: None
'''
pass
def test_3_page_after_page(self):
'''
Get page after page manually
:return: None
'''
pass
def test_4_connection_error(self):
'''
Backend connection error when getting an object...
:return: None
'''
pass
| 9 | 7 | 35 | 4 | 23 | 8 | 3 | 0.34 | 1 | 6 | 2 | 0 | 4 | 0 | 6 | 6 | 218 | 30 | 140 | 35 | 131 | 48 | 126 | 32 | 119 | 7 | 1 | 2 | 15 |
3,871 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/alignak_backend_client/client.py
|
alignak_backend_client.client.BackendException
|
class BackendException(Exception):
"""Specific backend exception class.
This specific exception is raised by the module when an error is encountered.
It provides an error code, an error message and the backend response.
Defined error codes:
- 1000: first stage error, exception raising between the client and the backend when connecting
- <1000: second stage error. Connection between client and backend is ok,
but the backend returns errors on
requests
"""
# TODO: create a special Exception for managing problems in the session,
# and another inside the response decoding
def __init__(self, code, message, response=None):
# Call the base class constructor with the parameters it needs
super(BackendException, self).__init__(message)
self.code = code
self.message = message
self.response = response
logger.error(self.__str__())
def __str__(self):
"""Exception to String"""
if self.response and not isinstance(self.response, dict):
return "BackendException raised with code {0} and message:" \
" {1} - {2}".format(self.code, self.message, self.response.content)
return "BackendException raised with code {0} and message:" \
" {1} - {2}".format(self.code, self.message, self.response)
|
class BackendException(Exception):
'''Specific backend exception class.
This specific exception is raised by the module when an error is encountered.
It provides an error code, an error message and the backend response.
Defined error codes:
- 1000: first stage error, exception raising between the client and the backend when connecting
- <1000: second stage error. Connection between client and backend is ok,
but the backend returns errors on
requests
'''
def __init__(self, code, message, response=None):
pass
def __str__(self):
'''Exception to String'''
pass
| 3 | 2 | 8 | 1 | 6 | 1 | 2 | 1 | 1 | 2 | 0 | 0 | 2 | 3 | 2 | 12 | 31 | 5 | 13 | 6 | 10 | 13 | 11 | 6 | 8 | 2 | 3 | 1 | 3 |
3,872 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_login.py
|
test_login.TestLoginLogout
|
class TestLoginLogout(unittest2.TestCase):
"""
Test login and logout to the backend
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:param module:
:return: None
"""
print("")
print("stop alignak backend")
cls.pid.kill()
def test_01_creation(self):
"""
Create connection with the backend
:return: None
"""
print('')
print('test creation')
print('Create client API for URL:', self.backend_address)
backend = Backend(self.backend_address)
print('object:', backend)
print('authenticated:', backend.authenticated)
print('endpoint:', backend.url_endpoint_root)
print('token:', backend.token)
assert_false(backend.authenticated)
assert_true(backend.url_endpoint_root == self.backend_address)
assert_equal(backend.token, None)
print('Create client API (trailing slash is removed)')
backend = Backend(self.backend_address + '/')
print('object:', backend)
print('authenticated:', backend.authenticated)
print('endpoint:', backend.url_endpoint_root)
print('token:', backend.token)
assert_false(backend.authenticated)
assert_true(backend.url_endpoint_root == self.backend_address)
assert_equal(backend.token, None)
def test_02_bad_parameters(self):
"""
Test with bad username/password
:return: None
"""
print('')
print('test refused connection with username/password')
# Create client API
backend = Backend(self.backend_address)
print('Login - missing credentials ...')
with assert_raises(BackendException) as cm:
backend.login(None, None)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 1000, str(ex))
assert_true("Missing mandatory parameters" in str(ex))
print('Login - missing credentials ...')
with assert_raises(BackendException) as cm:
backend.login('', '')
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 1000, str(ex))
assert_true("Missing mandatory parameters" in str(ex))
print('Login - missing credentials ...')
with assert_raises(BackendException) as cm:
backend.login('admin', '')
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 1000, str(ex))
print("invalid username/password, login refused")
result = backend.login('admin', 'bad_password')
assert_false(result)
assert_false(backend.authenticated)
def test_03_token_generate(self):
"""
Test token generation
:return: None
"""
print('')
print('force authentication token generation')
# Create client API
backend = Backend(self.backend_address)
print('request new token generation')
backend.login('admin', 'admin', 'force')
assert_true(backend.authenticated)
token1 = backend.token
print('token1:', token1)
print('request new token generation')
backend.login('admin', 'admin', 'force')
print('authenticated:', backend.authenticated)
assert_true(backend.authenticated)
token2 = backend.token
print('token2:', token2)
assert_true(token1 != token2)
def test_04_login(self):
"""
Test with right username / password
:return: None
"""
print('')
print('test accepted connection with username/password')
# Create client API
backend = Backend(self.backend_address)
print('Login ...')
assert backend.login('admin', 'admin')
print('authenticated:', backend.authenticated)
print('token:', backend.token)
assert_true(backend.authenticated)
print('Logout ...')
backend.logout()
print('authenticated:', backend.authenticated)
print('token:', backend.token)
assert_false(backend.authenticated)
print('Login ...')
print('authenticated:', backend.authenticated)
assert backend.login('admin', 'admin')
print('authenticated:', backend.authenticated)
print('token:', backend.token)
assert_true(backend.authenticated)
print('Logout ...')
backend.logout()
print('authenticated:', backend.authenticated)
print('token:', backend.token)
assert_false(backend.authenticated)
print('Logout ...')
backend.logout()
print('authenticated:', backend.authenticated)
print('token:', backend.token)
assert_false(backend.authenticated)
print('get object ... must be refused!')
with assert_raises(BackendException) as cm:
backend.get('host')
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 401, str(ex))
print('get_all object ... must be refused!')
with assert_raises(BackendException) as cm:
backend.get_all('host')
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 401, str(ex))
print('get all domains ... must be refused!')
with assert_raises(BackendException) as cm:
backend.get_domains()
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 401, str(ex))
print('post data ... must be refused!')
with assert_raises(BackendException) as cm:
data = {'fake': 'fake'}
backend.post('user', data=data)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 401, str(ex))
print('patch data ... must be refused!')
with assert_raises(BackendException) as cm:
data = {'fake': 'fake'}
headers = {'If-Match': ''}
backend.patch('user', data=data, headers=headers)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 405, str(ex))
print('delete data ... must be refused!')
with assert_raises(BackendException) as cm:
headers = {'If-Match': ''}
backend.delete('user', headers=headers)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 401, str(ex))
|
class TestLoginLogout(unittest2.TestCase):
'''
Test login and logout to the backend
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:param module:
:return: None
'''
pass
def test_01_creation(self):
'''
Create connection with the backend
:return: None
'''
pass
def test_02_bad_parameters(self):
'''
Test with bad username/password
:return: None
'''
pass
def test_03_token_generate(self):
'''
Test token generation
:return: None
'''
pass
def test_04_login(self):
'''
Test with right username / password
:return: None
'''
pass
| 9 | 7 | 36 | 6 | 26 | 5 | 1 | 0.22 | 1 | 4 | 2 | 0 | 4 | 0 | 6 | 6 | 230 | 39 | 157 | 23 | 148 | 34 | 148 | 19 | 141 | 1 | 1 | 1 | 6 |
3,873 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_login.py
|
test_login.TestLoginLogoutConnection
|
class TestLoginLogoutConnection(unittest2.TestCase):
"""
Test login and logout to the backend - backend is not available
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("Do not start alignak backend")
cls.backend_address = "http://localhost:5000"
def test_05_login(self):
"""
Test with right username / password
:return: None
"""
print('test connection error when login')
# Create client API
backend = Backend(self.backend_address)
print('Login ... must be refused!')
with assert_raises(BackendException) as cm:
backend.login('admin', 'admin')
ex = cm.exception
self.assertEqual(ex.code, 1000)
|
class TestLoginLogoutConnection(unittest2.TestCase):
'''
Test login and logout to the backend - backend is not available
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
def test_05_login(self):
'''
Test with right username / password
:return: None
'''
pass
| 4 | 3 | 13 | 3 | 6 | 5 | 1 | 1 | 1 | 2 | 2 | 0 | 1 | 0 | 2 | 2 | 33 | 7 | 13 | 7 | 9 | 13 | 12 | 5 | 9 | 1 | 1 | 1 | 2 |
3,874 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_multiprocess.py
|
test_multiprocess.test_multiprocess
|
class test_multiprocess(unittest2.TestCase):
"""
test multiprocess to get items in backend
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:return: None
"""
print("")
print("stop alignak backend")
cls.pid.kill()
time.sleep(1)
def test_multiprocess(self):
"""
Test multiprocess get right all elements
:return: None
"""
print('')
print('test creation')
print('Create client API for URL:', self.backend_address)
backend = Backend(self.backend_address, 8)
backend.login('admin', 'admin')
items = backend.get('realm')
realm_id = items['_items'][0]['_id']
# add 2000 commands
backend.delete("command", {})
data = {'command_line': 'check_ping', '_realm': realm_id}
for i in range(1, 2001):
data['name'] = "cmd %d" % i
backend.post('command', data)
# get without multiprocess
backend_yannsolo = Backend(self.backend_address)
backend_yannsolo.login('admin', 'admin')
start_time = time.time()
resp = backend_yannsolo.get_all('command', {'max_results': 20})
threads_1 = time.time() - start_time
self.assertEqual(len(resp['_items']), 2002, "Number of commands in non multiprocess mode")
# get with multiprocess (8 processes)
start_time = time.time()
resp = backend.get_all('command', {'max_results': 20})
threads_8 = time.time() - start_time
self.assertEqual(len(resp['_items']), 2002, "Number of commands in multiprocess mode")
ids = []
for dat in resp['_items']:
ids.append(dat['_id'])
self.assertEqual(len(ids), 2002, "Number of id")
# remove duplicates
ids_final = set(ids)
self.assertEqual(len(ids_final), 2002, "Number of id unique")
print(threads_1)
print(threads_8)
|
class test_multiprocess(unittest2.TestCase):
'''
test multiprocess to get items in backend
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:return: None
'''
pass
def test_multiprocess(self):
'''
Test multiprocess get right all elements
:return: None
'''
pass
| 6 | 4 | 28 | 5 | 17 | 6 | 2 | 0.41 | 1 | 4 | 1 | 0 | 1 | 0 | 3 | 3 | 93 | 17 | 54 | 20 | 48 | 22 | 45 | 18 | 41 | 3 | 1 | 1 | 5 |
3,875 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_patch.py
|
test_patch.TestPatchClient
|
class TestPatchClient(unittest2.TestCase):
"""
Test patch (update) items
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
headers = {'Content-Type': 'application/json'}
params = {'username': 'admin', 'password': 'admin', 'action': 'generate'}
# get token
response = requests.post(cls.backend_address + '/login', json=params, headers=headers)
resp = response.json()
cls.token = resp['token']
cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')
# get realms
response = requests.get(cls.backend_address + '/realm',
auth=cls.auth)
resp = response.json()
cls.realmAll_id = resp['_items'][0]['_id']
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:param module:
:return: None
"""
print("stop alignak backend")
cls.pid.kill()
def test_1_patch_successful(self):
"""
Test patch a user successfully
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get user admin
params = {"where": {"name": "admin"}}
response = requests.get(self.backend_address + '/user', json=params, auth=self.auth)
resp = response.json()
user_id = resp['_items'][0]['_id']
user_etag = resp['_items'][0]['_etag']
data = {'alias': 'modified test'}
headers = {'If-Match': user_etag}
response = backend.patch('/'.join(['user', user_id]), data=data, headers=headers)
assert_true(response['_status'] == 'OK')
def test_1_patch_successful_inception(self):
"""
Test patch a user successfully with inception
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get user admin
params = {"where": {"name": "admin"}}
response = requests.get(self.backend_address + '/user', json=params, auth=self.auth)
resp = response.json()
user_id = resp['_items'][0]['_id']
# user_etag = resp['_items'][0]['_etag']
data = {'alias': 'modified test'}
headers = {'If-Match': 'foo'}
response = backend.patch('/'.join(['user', user_id]),
data=data, headers=headers, inception=True)
assert_true(response['_status'] == 'OK')
def test_2_patch_exception(self):
"""
Test patch a user with errors (so exceptions)
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get user admin
params = {"where": {"name": "admin"}}
response = requests.get(self.backend_address + '/user', json=params, auth=self.auth)
resp = response.json()
user_id = resp['_items'][0]['_id']
user_etag = resp['_items'][0]['_etag']
with assert_raises(BackendException) as cm:
data = {'alias': 'modified with no header'}
backend.patch('/'.join(['user', user_id]), data=data)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 1000, str(ex))
with assert_raises(BackendException) as cm:
data = {'bad_field': 'bad field name ... unknown in data model'}
headers = {'If-Match': user_etag}
backend.patch('/'.join(['user', user_id]), data=data, headers=headers, inception=True)
ex = cm.exception
assert_true(ex.code == 422)
with assert_raises(BackendException) as cm:
data = {'alias': 'modified test again and again'}
headers = {'If-Match': "567890987678"}
response = backend.patch('/'.join(['user', user_id]), data=data, headers=headers)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 412, str(ex))
def test_3_patch_connection_error(self):
"""
Backend connection error when updating an object...
:return: None
"""
print('test connection error when updating an object')
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get user admin
params = {"where": {"name": "admin"}}
response = requests.get(self.backend_address + '/user', json=params, auth=self.auth)
resp = response.json()
user_id = resp['_items'][0]['_id']
user_etag = resp['_items'][0]['_etag']
print("stop the alignak backend")
self.pid.kill()
with assert_raises(BackendException) as cm:
data = {'alias': 'modified test'}
headers = {'If-Match': user_etag}
response = backend.patch('/'.join(['user', user_id]), data=data, headers=headers)
assert_true(response['_status'] == 'OK')
ex = cm.exception
self.assertEqual(ex.code, 1000)
|
class TestPatchClient(unittest2.TestCase):
'''
Test patch (update) items
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:param module:
:return: None
'''
pass
def test_1_patch_successful(self):
'''
Test patch a user successfully
:return: None
'''
pass
def test_1_patch_successful_inception(self):
'''
Test patch a user successfully with inception
:return: None
'''
pass
def test_2_patch_exception(self):
'''
Test patch a user with errors (so exceptions)
:return: None
'''
pass
def test_3_patch_connection_error(self):
'''
Backend connection error when updating an object...
:return: None
'''
pass
| 9 | 7 | 26 | 4 | 16 | 6 | 1 | 0.39 | 1 | 4 | 2 | 0 | 4 | 0 | 6 | 6 | 168 | 29 | 100 | 49 | 91 | 39 | 89 | 45 | 82 | 1 | 1 | 1 | 6 |
3,876 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_post.py
|
test_post.TestPostClient
|
class TestPostClient(unittest2.TestCase):
"""
Test post (add) items
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
headers = {'Content-Type': 'application/json'}
params = {'username': 'admin', 'password': 'admin', 'action': 'generate'}
# get token
response = requests.post(cls.backend_address + '/login', json=params, headers=headers)
resp = response.json()
cls.token = resp['token']
cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')
# get realms
response = requests.get(cls.backend_address + '/realm',
auth=cls.auth)
resp = response.json()
cls.realmAll_id = resp['_items'][0]['_id']
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:param module:
:return: None
"""
print("stop alignak backend")
cls.pid.kill()
def test_1_post_successful(self):
"""
Test post a timeperiod successfully
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Create a new timeperiod
print('create a timeperiod')
data = {
"name": "Testing TP",
"alias": "Test TP",
"dateranges": [
{u'monday': u'09:00-17:00'},
{u'tuesday': u'09:00-17:00'},
{u'wednesday': u'09:00-17:00'},
{u'thursday': u'09:00-17:00'},
{u'friday': u'09:00-17:00'}
],
"_realm": self.realmAll_id
}
response = backend.post('timeperiod', data=data)
assert_true('_created' in response)
assert_true('_updated' in response)
assert_true(response['_created'] == response['_updated'])
def test_2_post_exceptions(self):
"""
Test post a user with errors (so exceptions)
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# Create a new user, bad parameters
# Mandatory field user_name is missing ...
data = {
"name": "Testing user",
"alias": "Fred",
"back_role_super_admin": False,
"back_role_admin": [],
"min_business_impact": 0,
}
with assert_raises(BackendException) as cm:
backend.post('user', data=data)
ex = cm.exception
assert_true(ex.code == 422)
def test_3_post_connection_error(self):
"""
Backend connection error when creating an object...
:return: None
"""
print('test connection error when creating an object')
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
print("stop the alignak backend")
self.pid.kill()
with assert_raises(BackendException) as cm:
# Create a new timeperiod
print('create a timeperiod')
data = {
"name": "Testing TP",
"alias": "Test TP",
"dateranges": [
{u'monday': u'09:00-17:00'},
{u'tuesday': u'09:00-17:00'},
{u'wednesday': u'09:00-17:00'},
{u'thursday': u'09:00-17:00'},
{u'friday': u'09:00-17:00'}
],
"_realm": self.realmAll_id
}
response = backend.post('timeperiod', data=data)
assert_true('_created' in response)
assert_true('_updated' in response)
assert_true(response['_created'] == response['_updated'])
ex = cm.exception
self.assertEqual(ex.code, 1000)
|
class TestPostClient(unittest2.TestCase):
'''
Test post (add) items
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:param module:
:return: None
'''
pass
def test_1_post_successful(self):
'''
Test post a timeperiod successfully
:return: None
'''
pass
def test_2_post_exceptions(self):
'''
Test post a user with errors (so exceptions)
:return: None
'''
pass
def test_3_post_connection_error(self):
'''
Backend connection error when creating an object...
:return: None
'''
pass
| 8 | 6 | 27 | 3 | 18 | 6 | 1 | 0.37 | 1 | 3 | 2 | 0 | 3 | 0 | 5 | 5 | 145 | 20 | 91 | 25 | 83 | 34 | 53 | 21 | 47 | 1 | 1 | 1 | 5 |
3,877 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/alignak_backend_client/backend_client.py
|
alignak_backend_client.backend_client.BackendUpdate
|
class BackendUpdate(object): # pylint: disable=useless-object-inheritance
"""Class to interface the Alignak backend to make some operations"""
embedded_resources = {
'realm': {
'_parent': 1,
},
'command': {
'_realm': 1,
},
'timeperiod': {
'_realm': 1,
},
'usergroup': {
'_realm': 1, '_parent': 1,
},
'hostgroup': {
'_realm': 1, '_parent': 1, 'hostgroups': 1, 'hosts': 1
},
'servicegroup': {
'_realm': 1, '_parent': 1, 'hostgroups': 1, 'hosts': 1
},
'user': {
'_realm': 1,
'host_notification_period': 1, 'host_notification_commands': 1,
'service_notification_period': 1, 'service_notification_commands': 1
},
'userrestrictrole': {
'_realm': 1, 'user': 1
},
'host': {
'_realm': 1, '_templates': 1,
'check_command': 1, 'snapshot_command': 1, 'event_handler': 1,
'check_period': 1, 'notification_period': 1,
'snapshot_period': 1, 'maintenance_period': 1,
'parents': 1, 'hostgroups': 1, 'users': 1, 'usergroups': 1
},
'service': {
'_realm': 1, '_templates': 1,
'host': 1,
'check_command': 1, 'snapshot_command': 1, 'event_handler': 1,
'check_period': 1, 'notification_period': 1,
'snapshot_period': 1, 'maintenance_period': 1,
'service_dependencies': 1, 'servicegroups': 1, 'users': 1, 'usergroups': 1
},
'hostdependency': {
'_realm': 1,
'hosts': 1, 'hostgroups': 1,
'dependent_hosts': 1, 'dependent_hostgroups': 1,
'dependency_period': 1
},
'servicedependency': {
'_realm': 1,
'hosts': 1, 'hostgroups': 1,
'dependent_hosts': 1, 'dependent_hostgroups': 1,
'services': 1, 'dependent_services': 1,
'dependency_period': 1
},
'hostescalation': {
'_realm': 1,
'hosts': 1, 'hostgroups': 1,
'users': 1, 'usergroups': 1,
'escalation_period': 1
},
'serviceescalation': {
'_realm': 1,
'services': 1,
'hosts': 1, 'hostgroups': 1,
'users': 1, 'usergroups': 1,
'escalation_period': 1
},
'graphite': {
'grafana': 1, 'statsd': 1
},
'influxdb': {
'grafana': 1, 'statsd': 1
}
}
def __init__(self):
self.logged_in = False
self.logged_in_user = None
# Get command line parameters
args = None
try:
args = docopt(__doc__, version=__version__)
except DocoptExit as exp:
print("Command line parsing error:\n%s." % (exp))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Exiting with error code: 64")
exit(64)
# Verbose mode
self.verbose = False
if args['--verbose']:
logger.setLevel('DEBUG')
self.verbose = True
# Quiet mode
self.quiet = False
if args['--quiet']:
logger.setLevel('NOTSET')
self.quiet = True
# Dry-run mode?
self.dry_run = args['--check']
logger.debug("Dry-run mode (check only): %s", self.dry_run)
# Backend URL
self.backend = None
self.backend_url = args['--backend']
logger.debug("Backend URL: %s", self.backend_url)
# Backend authentication
self.username = args['--username']
self.password = args['--password']
logger.debug("Backend login with credentials: %s/%s", self.username, self.password)
# Get a list
self.list = args['--list']
logger.debug("Get a list: %s", self.list)
# Get the objects templates in the list
self.model = args['--model']
logger.debug("Get the templates: %s", self.model)
# Get the item type
self.item_type = args['--type']
logger.debug("Item type: %s", self.item_type)
# Get the action to execute
self.action = args['<action>']
if self.action is None:
self.action = 'get'
logger.debug("Action to execute: %s", self.action)
if self.action not in ['add', 'update', 'get', 'list', 'delete']:
print("Action '%s' is not authorized." % (self.action))
exit(64)
# Get the targeted item
self.item = args['<item>']
if self.item == 'None':
self.item = None
logger.debug("Targeted item name: %s", self.item)
# Get the template to use
# pylint: disable=no-member
self.templates = args['--template']
logger.debug("Using the template(s): %s", self.templates)
if self.templates:
if ',' in self.templates:
self.templates = self.templates.split(',')
else:
self.templates = [self.templates]
if self.list and not self.item_type:
self.item_type = self.item
logger.debug("Item type (computed): %s", self.item_type)
# Embedded mode
self.embedded = args['--embedded']
logger.debug("Embedded mode: %s", self.embedded)
# Get the data files folder
self.folder = None
if args['--folder'] != 'none':
self.folder = args['--folder']
logger.debug("Data files folder: %s", self.folder)
# Get the associated data file
self.data = None
if args['--data'] != 'none':
self.data = args['--data']
logger.debug("Item data provided: %s", self.data)
self.include_read_data = args['--include-read-data']
logger.debug("Use backend read data: %s", self.include_read_data)
def initialize(self):
# pylint: disable=attribute-defined-outside-init
"""Login on backend with username and password
:return: None
"""
try:
logger.info("Authenticating...")
self.backend = Backend(self.backend_url)
self.backend.login(self.username, self.password)
except BackendException as exp: # pragma: no cover, should never happen
logger.exception("Exception: %s", exp)
logger.error("Response: %s", exp.response)
if self.backend.token is None:
print("Access denied!")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Exiting with error code: 1")
exit(1)
logger.info("Authenticated.")
# Logged-in user and default realm
users = self.backend.get_all('user', {'where': json.dumps({'name': self.username})})
self.logged_in_user = users['_items'][0]
self.default_realm = self.logged_in_user['_realm']
# Main realm
self.realm_all = None
realms = self.backend.get_all('realm')
for r in realms['_items']:
if r['name'] == 'All' and r['_level'] == 0:
self.realm_all = r['_id']
logger.info("Found realm 'All': %s", self.realm_all)
if r['_id'] == self.default_realm:
logger.info("Found logged-in user realm: %s", r['name'])
# Default timeperiods
self.tp_always = None
self.tp_never = None
timeperiods = self.backend.get_all('timeperiod')
for tp in timeperiods['_items']:
if tp['name'] == '24x7':
self.tp_always = tp['_id']
logger.info("Found TP '24x7': %s", self.tp_always)
if tp['name'].lower() == 'none' or tp['name'].lower() == 'never':
self.tp_never = tp['_id']
logger.info("Found TP 'Never': %s", self.tp_never)
def file_dump(self, data, filename): # pylint: disable=no-self-use
"""
Dump the data to a JSON formatted file
:param data: data to be dumped
:param filename: name of the file to use. Only the file name, not the full path!
:return: dumped file absolute file name
"""
dump = json.dumps(data, indent=4,
separators=(',', ': '), sort_keys=True)
path = os.path.join(self.folder or os.getcwd(), filename)
try:
dfile = open(path, "wt")
dfile.write(dump)
dfile.close()
return path
except (OSError, IndexError) as exp: # pragma: no cover, should never happen
logger.exception("Error when writing the list dump file %s : %s", path, str(exp))
return None
def get_resource_list(self, resource_name, name=''):
# pylint: disable=too-many-locals, too-many-nested-blocks
"""Get a specific resource list
If name is not None, it may be a request to get the list of the services of an host.
"""
try:
logger.info("Trying to get %s list", resource_name)
params = {}
if resource_name in ['host', 'service', 'user']:
params = {'where': json.dumps({'_is_template': self.model})}
if resource_name == 'service' and name and '/' in name:
splitted_name = name.split('/')
# Get host from name
response2 = self.backend.get(
'host', params={'where': json.dumps({'name': splitted_name[0],
'_is_template': self.model})})
if response2['_items']:
host = response2['_items'][0]
logger.info("Got host '%s' for the service '%s'",
splitted_name[0], splitted_name[1])
else:
logger.warning("Not found host '%s'!", splitted_name[0])
return False
params = {'where': json.dumps({'host': host['_id']})}
if self.embedded and resource_name in self.embedded_resources:
params.update({'embedded': json.dumps(self.embedded_resources[resource_name])})
rsp = self.backend.get_all(resource_name, params=params)
if rsp['_items'] and rsp['_status'] == 'OK':
response = rsp['_items']
logger.info("-> found %ss", resource_name)
# Exists in the backend, we got the element
if not self.dry_run:
logger.info("-> dumping %ss list", resource_name)
for item in response:
# Filter fields prefixed with an _ (internal backend fields)
for field in list(item):
if field in ['_created', '_updated', '_etag', '_links', '_status']:
item.pop(field)
continue
# Filter fields prefixed with an _ in embedded items
if self.embedded and resource_name in self.embedded_resources and \
field in self.embedded_resources[resource_name]:
# Embedded items may be a list or a simple dictionary,
# always make it a list
embedded_items = item[field]
if not isinstance(item[field], list):
embedded_items = [item[field]]
# Filter fields in each embedded item
for embedded_item in embedded_items:
if not embedded_item:
continue
for embedded_field in list(embedded_item):
if embedded_field.startswith('_'):
embedded_item.pop(embedded_field)
filename = self.file_dump(response, 'alignak-%s-list-%ss.json'
% ('model' if self.model else 'object',
resource_name))
if filename:
logger.info("-> dumped %ss list to %s", resource_name, filename)
else:
logger.info("Dry-run mode: should have dumped an %s list", resource_name)
return True
else:
logger.warning("-> %s list is empty", resource_name)
if not self.dry_run:
logger.info("-> dumping %ss list", resource_name)
filename = self.file_dump([], 'alignak-%s-list-%ss.json'
% ('model' if self.model else 'object',
resource_name))
if filename:
logger.info("-> dumped %ss list to %s", resource_name, filename)
return True
except BackendException as exp: # pragma: no cover, should never happen
logger.exception("Exception: %s", exp)
logger.error("Response: %s", exp.response)
print("Get error for '%s' list" % (resource_name))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Exiting with error code: 5")
return False
def get_resource(self, resource_name, name):
# pylint: disable=too-many-locals, too-many-nested-blocks
"""Get a specific resource by name"""
try:
logger.info("Trying to get %s: '%s'", resource_name, name)
services_list = False
if resource_name == 'host' and '/' in name:
splitted_name = name.split('/')
services_list = True
name = splitted_name[0]
params = {'where': json.dumps({'name': name})}
if resource_name in ['host', 'service', 'user']:
params = {'where': json.dumps({'name': name, '_is_template': self.model})}
if resource_name == 'service' and '/' in name:
splitted_name = name.split('/')
# new_name = splitted_name[0] + '_' + splitted_name[1]
# name = splitted_name[1]
# Get host from name
response2 = self.backend.get(
'host', params={'where': json.dumps({'name': splitted_name[0]})})
if response2['_items']:
host = response2['_items'][0]
logger.info("Got host '%s' for the service '%s'",
splitted_name[0], splitted_name[1])
else:
logger.warning("Not found host '%s'!", splitted_name[0])
return False
params = {'where': json.dumps({'name': splitted_name[1],
'host': host['_id'],
'_is_template': self.model})}
if self.embedded and resource_name in self.embedded_resources:
params.update({'embedded': json.dumps(self.embedded_resources[resource_name])})
response = self.backend.get(resource_name, params=params)
if response['_items']:
response = response['_items'][0]
logger.info("-> found %s '%s': %s", resource_name, name, response['_id'])
if services_list:
# Get services for the host
params = {'where': json.dumps({'host': response['_id']})}
if self.embedded and 'service' in self.embedded_resources:
params.update(
{'embedded': json.dumps(self.embedded_resources['service'])})
response2 = self.backend.get('service', params=params)
if response2['_items']:
response['_services'] = response2['_items']
logger.info("Got %d services for host '%s'",
len(response2['_items']), splitted_name[0])
else:
logger.warning("Not found host '%s'!", splitted_name[0])
return False
# Exists in the backend, we got the element
if not self.dry_run:
logger.info("-> dumping %s: %s", resource_name, name)
# Filter fields prefixed with an _ (internal backend fields)
for field in list(response):
if field in ['_created', '_updated', '_etag', '_links', '_status']:
response.pop(field)
continue
# Filter fields prefixed with an _ in embedded items
if self.embedded and resource_name in self.embedded_resources and \
field in self.embedded_resources[resource_name]:
logger.info("-> embedded %s", field)
# Embedded items may be a list or a simple dictionary,
# always make it a list
embedded_items = response[field]
if not isinstance(response[field], list):
embedded_items = [response[field]]
# Filter fields in each embedded item
for embedded_item in embedded_items:
if not embedded_item:
continue
for embedded_field in list(embedded_item):
if embedded_field.startswith('_'):
embedded_item.pop(embedded_field)
dump = json.dumps(response, indent=4,
separators=(',', ': '), sort_keys=True)
if not self.quiet:
print(dump)
if resource_name == 'service' and '/' in name:
name = splitted_name[0] + '_' + splitted_name[1]
filename = self.file_dump(response,
'alignak-object-dump-%s-%s.json'
% (resource_name, name))
if filename:
logger.info("-> dumped %s '%s' to %s", resource_name, name, filename)
logger.info("-> dumped %s: %s", resource_name, name)
else:
if resource_name == 'service' and '/' in name:
name = splitted_name[0] + '_' + splitted_name[1]
logger.info("Dry-run mode: should have dumped an %s '%s'",
resource_name, name)
return True
else:
logger.warning("-> %s '%s' not found", resource_name, name)
return False
except BackendException as exp: # pragma: no cover, should never happen
logger.exception("Exception: %s", exp)
logger.error("Response: %s", exp.response)
print("Get error for '%s' : %s" % (resource_name, name))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Exiting with error code: 5")
return False
def delete_resource(self, resource_name, name):
"""Delete a specific resource by name"""
try:
logger.info("Trying to get %s: '%s'", resource_name, name)
if name is None:
# No name is defined, delete all the resources...
if not self.dry_run:
headers = {
'Content-Type': 'application/json'
}
logger.info("-> deleting all %s", resource_name)
self.backend.delete(resource_name, headers)
logger.info("-> deleted all %s", resource_name)
else:
response = {'_id': '_fake', '_etag': '_fake'}
logger.info("Dry-run mode: should have deleted all %s", resource_name)
else:
params = {'where': json.dumps({'name': name})}
if resource_name in ['host', 'service', 'user']:
params = {'where': json.dumps({'name': name, '_is_template': self.model})}
if resource_name == 'service' and '/' in name:
splitted_name = name.split('/')
name = splitted_name[0] + '_' + splitted_name[1]
# Get host from name
response2 = self.backend.get(
'host', params={'where': json.dumps({'name': splitted_name[0]})})
if response2['_items']:
host = response2['_items'][0]
logger.info("Got host '%s' for the service '%s'",
splitted_name[0], splitted_name[1])
else:
logger.warning("Not found host '%s'!", splitted_name[0])
return False
if splitted_name[1] == '*':
params = {'where': json.dumps({'host': host['_id']})}
else:
params = {'where': json.dumps({'name': splitted_name[1],
'host': host['_id']})}
response = self.backend.get_all(resource_name, params=params)
if response['_items']:
logger.info("-> found %d matching %s", len(response['_items']), resource_name)
for item in response['_items']:
logger.info("-> found %s '%s': %s", resource_name, name, item['name'])
# Exists in the backend, we must delete the element...
if not self.dry_run:
headers = {
'Content-Type': 'application/json',
'If-Match': item['_etag']
}
logger.info("-> deleting %s: %s", resource_name, item['name'])
self.backend.delete(resource_name + '/' + item['_id'], headers)
logger.info("-> deleted %s: %s", resource_name, item['name'])
else:
response = {'_id': '_fake', '_etag': '_fake'}
logger.info("Dry-run mode: should have deleted an %s '%s'",
resource_name, name)
logger.info("-> deleted: '%s': %s",
resource_name, item['_id'])
else:
logger.warning("-> %s item '%s' not found", resource_name, name)
return False
except BackendException as exp: # pragma: no cover, should never happen
logger.exception("Exception: %s", exp)
logger.error("Response: %s", exp.response)
print("Deletion error for '%s' : %s" % (resource_name, name))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Exiting with error code: 5")
return False
return True
def create_update_resource(self, resource_name, name, update=False):
# pylint: disable=too-many-return-statements, too-many-locals
# pylint: disable=too-many-nested-blocks
"""Create or update a specific resource
:param resource_name: backend resource endpoint (eg. host, user, ...)
:param name: name of the resource to create/update
:param update: True to update an existing resource, else will try to create
:return:
"""
if self.data is None:
self.data = {}
# If some data are provided, try to get them
json_data = None
if self.data:
try:
# Data may be provided on the command line or from a file
if self.data == 'stdin':
input_file = sys.stdin
else:
path = os.path.join(self.folder or os.getcwd(), self.data)
input_file = open(path)
json_data = json.load(input_file)
logger.info("Got provided data: %s", json_data)
if input_file is not sys.stdin:
input_file.close()
except IOError:
logger.error("Error reading data file: %s", path)
return False
except ValueError:
logger.error("Error malformed data file: %s", path)
return False
if name is None and json_data is None:
logger.error("-> can not add/update a %s without a name and/or data!", resource_name)
return False
# Manage provided templates
used_templates = []
if self.templates is not None:
logger.info("Searching the %s template(s): %s",
resource_name, self.templates)
for template in self.templates:
response = self.backend.get(
resource_name, params={'where': json.dumps({'name': template,
'_is_template': True})})
if response['_items']:
used_templates.append(response['_items'][0]['_id'])
logger.info("-> found %s template '%s': %s",
resource_name, template, response['_items'][0]['_id'])
else:
logger.error("-> %s required template not found '%s'", resource_name, template)
return False
try:
if json_data is None:
json_data = {'name': name}
if not isinstance(json_data, list):
json_data = [json_data]
logger.info("Got %d %ss", len(json_data), resource_name)
count = 0
for json_item in json_data:
logger.info("-> json item: %s", json_item)
if resource_name not in ['history', 'userrestrictrole', 'logcheckresult'] \
and name is None and ('name' not in json_item or not json_item['name']):
logger.warning("-> unnamed '%s'!", resource_name)
continue
# Manage resource name
item_name = name
if 'name' in json_item:
item_name = json_item['name']
# Got the item name
params = {'name': item_name}
if resource_name == 'service' and 'host' in json_item:
# Get host from name
host_search = {'name': json_item['host']}
if '_is_template' in json_item:
host_search.update({'_is_template': json_item['_is_template']})
logger.info("Host search: %s", host_search)
resp_host = self.backend.get(
'host', params={'where': json.dumps(host_search)})
if resp_host['_items']:
host = resp_host['_items'][0]
logger.info("Got host '%s' for the service '%s'", host['name'], item_name)
else:
logger.warning("Host not found: '%s' for the service: %s!",
json_item['host'], item_name)
continue
params = {'name': item_name, 'host': host['_id']}
if resource_name == 'service' and '/' in item_name:
splitted_name = item_name.split('/')
# Get host from name
host_search = {'name': splitted_name[0]}
if '_is_template' in json_item:
host_search.update({'_is_template': json_item['_is_template']})
resp_host = self.backend.get(
'host', params={'where': json.dumps(host_search)})
if resp_host['_items']:
host = resp_host['_items'][0]
logger.info("Got host '%s' for the service '%s'",
splitted_name[0], splitted_name[1])
else:
logger.warning("Host not found: '%s' for the service: %s!",
splitted_name[0], item_name)
continue
item_name = splitted_name[1]
params = {'name': item_name, 'host': host['_id']}
if '_is_template' in json_item:
params.update({'_is_template': json_item['_is_template']})
params = {'where': json.dumps(params)}
if name:
logger.info("Trying to get %s: '%s', params: %s",
resource_name, item_name, params)
response = self.backend.get(resource_name, params=params)
if response['_items']:
found_item = response['_items'][0]
found_id = found_item['_id']
found_etag = found_item['_etag']
logger.info("-> found %s '%s': %s", resource_name, item_name, found_id)
if not update:
logger.warning("-> '%s' %s cannot be created because it already "
"exists!", resource_name, item_name)
continue
else:
if update:
logger.warning("-> '%s' %s cannot be updated because it does not "
"exist!", resource_name, item_name)
continue
# Item data updated with provided information if some
# Data to update
item_data = {}
if self.include_read_data:
# Include read data if required
item_data = found_item
# Json provided data update existing data
item_data.update(json_item)
# Name is also updated (eg. for a service...)
item_data['name'] = item_name
# Template information if templating is required
if used_templates:
item_data.update({'_templates': used_templates,
'_templates_with_services': True})
for field in item_data.copy():
logger.debug("Field: %s = %s", field, item_data[field])
# Filter Eve extra fields
if field in ['_created', '_updated', '_etag', '_links', '_status']:
item_data.pop(field)
continue
# Filter specific backend inner computed fields
# pylint: disable=fixme
# todo: list to be completed!
if field in ['_overall_state_id']:
item_data.pop(field)
continue
# Manage potential object link fields
if field not in ['realm', '_realm', '_templates',
'command', 'host', 'service',
'escalation_period', 'maintenance_period',
'snapshot_period', 'check_period', 'dependency_period',
'notification_period', 'host_notification_period',
'escalation_period', 'service_notification_period',
'host_notification_commands', 'service_notification_commands',
'service_dependencies', 'users', 'usergroups',
'check_command', 'event_handler', 'grafana', 'statsd']:
continue
field_values = item_data[field]
if not isinstance(item_data[field], list):
field_values = [item_data[field]]
found = None
for value in field_values:
logger.debug(" - %s, single value: %s", field, value)
try:
int(value, 16)
logger.debug(" - %s, uuid value: %s", field, value)
if not isinstance(item_data[field], list):
found = value
else:
if found is None:
found = []
found.append(value)
except TypeError:
pass
except ValueError:
# Not an integer, consider an item name
field_params = {'where': json.dumps({'name': value})}
logger.debug(" - %s, params: %s", field, field_params)
if field in ['escalation_period', 'maintenance_period',
'snapshot_period', 'check_period',
'dependency_period', 'notification_period',
'host_notification_period',
'service_notification_period']:
response2 = self.backend.get('timeperiod', params=field_params)
elif field in ['_realm']:
response2 = self.backend.get('realm', params=field_params)
elif field in ['service_dependencies']:
response2 = self.backend.get('service', params=field_params)
elif field in ['users']:
response2 = self.backend.get('user', params=field_params)
elif field in ['usergroups']:
response2 = self.backend.get('usergroup', params=field_params)
elif field in ['check_command', 'event_handler',
'service_notification_commands',
'host_notification_commands']:
response2 = self.backend.get('command', params=field_params)
elif field in ['_templates']:
field_params = {'where': json.dumps({'name': value,
'_is_template': True})}
response2 = self.backend.get(resource_name, params=field_params)
else:
response2 = self.backend.get(field, params=field_params)
if response2['_items']:
response2 = response2['_items'][0]
logger.info("Replaced %s = %s with found item _id",
field, value)
if not isinstance(item_data[field], list):
found = response2['_id']
else:
if found is None:
found = []
found.append(response2['_id'])
if found is None:
logger.warning("Not found %s = %s, removing field!", field, field_values)
item_data.pop(field)
else:
item_data[field] = found
if resource_name not in ['realm'] and '_realm' not in item_data:
logger.info("add default realm to the data")
item_data.update({'_realm': self.default_realm})
if resource_name in ['realm'] and '_realm' not in item_data:
logger.info("add parent realm to the data")
item_data.update({'_parent': self.default_realm})
if '_id' in item_data:
item_data.pop('_id')
if not update:
# Trying to create a new element
if not item_data['name']:
item_data.pop('name')
logger.info("-> trying to create the %s: %s.", resource_name, item_name)
logger.debug("-> with: %s.", item_data)
if not self.dry_run:
try:
response = self.backend.post(resource_name, item_data, headers=None)
except BackendException as exp:
self.item = item_name
logger.error("Exception: %s", exp)
# logger.error("Response: %s", exp.response)
continue
else:
response = {'_status': 'OK', '_id': '_fake', '_etag': '_fake'}
else:
if not name:
logger.warning("-> can not update '%s' with no name!", resource_name)
continue
# Trying to update an element
logger.info("-> trying to update the %s: %s.", resource_name, item_name)
logger.debug("-> with: %s.", item_data)
if not self.dry_run:
try:
headers = {'Content-Type': 'application/json', 'If-Match': found_etag}
response = self.backend.patch(resource_name + '/' + found_id,
item_data, headers=headers,
inception=True)
except BackendException as exp:
self.item = item_name
logger.exception("Exception: %s", exp)
# logger.error("Response: %s", exp.response)
continue
else:
response = {'_status': 'OK', '_id': '_fake', '_etag': '_fake'}
if response['_status'] == 'ERR':
logger.warning("Response: %s", response)
return False
if not update:
# Created a new element
if not self.dry_run:
logger.info("-> created: '%s': %s", resource_name, response['_id'])
else:
logger.info("Dry-run mode: should have created an %s '%s'",
resource_name, name)
else:
# Updated an element
if not self.dry_run:
logger.info("-> updated: '%s': %s", resource_name, response['_id'])
else:
logger.info("Dry-run mode: should have updated an %s '%s'",
resource_name, name)
count = count + 1
except BackendException as exp: # pragma: no cover, should never happen
logger.exception("Exception: %s", exp)
logger.error("Response: %s", exp.response)
print("Creation/update error for '%s' : %s" % (resource_name, name))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Exiting with error code: 5")
return False
if count == len(json_data):
return True
return False
|
class BackendUpdate(object):
'''Class to interface the Alignak backend to make some operations'''
def __init__(self):
pass
def initialize(self):
'''Login on backend with username and password
:return: None
'''
pass
def file_dump(self, data, filename):
'''
Dump the data to a JSON formatted file
:param data: data to be dumped
:param filename: name of the file to use. Only the file name, not the full path!
:return: dumped file absolute file name
'''
pass
def get_resource_list(self, resource_name, name=''):
'''Get a specific resource list
If name is not None, it may be a request to get the list of the services of an host.
'''
pass
def get_resource_list(self, resource_name, name=''):
'''Get a specific resource by name'''
pass
def delete_resource(self, resource_name, name):
'''Delete a specific resource by name'''
pass
def create_update_resource(self, resource_name, name, update=False):
'''Create or update a specific resource
:param resource_name: backend resource endpoint (eg. host, user, ...)
:param name: name of the resource to create/update
:param update: True to update an existing resource, else will try to create
:return:
'''
pass
| 8 | 7 | 112 | 14 | 85 | 14 | 21 | 0.15 | 1 | 9 | 2 | 0 | 7 | 23 | 7 | 7 | 868 | 106 | 672 | 104 | 664 | 98 | 499 | 97 | 491 | 66 | 1 | 9 | 146 |
3,878 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/test/test_put.py
|
test_put.TestPutClient
|
class TestPutClient(unittest2.TestCase):
"""
Test put (replace) items
"""
@classmethod
def setUpClass(cls):
"""
Function used in the beginning of test to prepare the backend
:param module:
:return: None
"""
print("start alignak backend")
cls.backend_address = "http://localhost:5000"
# Set DB name for tests
os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'] = 'alignak-backend-test'
# Delete used mongo DBs
exit_code = subprocess.call(
shlex.split(
'mongo %s --eval "db.dropDatabase()"' % os.environ['ALIGNAK_BACKEND_MONGO_DBNAME'])
)
assert exit_code == 0
cls.pid = subprocess.Popen([
'uwsgi', '--plugin', 'python', '-w', 'alignakbackend:app',
'--socket', '0.0.0.0:5000', '--protocol=http', '--enable-threads', '--pidfile',
'/tmp/uwsgi.pid'
])
time.sleep(3)
headers = {'Content-Type': 'application/json'}
params = {'username': 'admin', 'password': 'admin', 'action': 'generate'}
# get token
response = requests.post(cls.backend_address + '/login', json=params, headers=headers)
resp = response.json()
cls.token = resp['token']
cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')
# get realms
response = requests.get(cls.backend_address + '/realm',
auth=cls.auth)
resp = response.json()
cls.realmAll_id = resp['_items'][0]['_id']
# add an alignakretention item
data = {
'host': 'srv001',
'latency': 0,
'last_state_type': 'HARD',
'state': 'UP',
'last_chk': 0,
}
response = requests.post(cls.backend_address + '/alignakretention', json=data,
headers=headers, auth=cls.auth)
resp = response.json()
assert resp['_status'] == 'OK'
@classmethod
def tearDownClass(cls):
"""
Stop the backend at the end of the tests
:param module:
:return: None
"""
print("stop alignak backend")
cls.pid.kill()
def test_1_put_successful(self):
"""
Test put an alignakretention successfully
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get alignakretention
params = {}
response = requests.get(self.backend_address + '/alignakretention', json=params,
auth=self.auth)
resp = response.json()
ar_id = resp['_items'][0]['_id']
ar_etag = resp['_items'][0]['_etag']
data = {
'host': 'srv001',
'latency': 2,
'last_state_type': 'HARD',
'state': 'UP',
'last_chk': 0,
}
headers = {'If-Match': ar_etag}
response = backend.put('/'.join(['alignakretention', ar_id]), data=data, headers=headers)
assert_true(response['_status'] == 'OK')
response = requests.get(self.backend_address + '/alignakretention', json=params,
auth=self.auth)
resp = response.json()
self.assertEqual('srv001', resp['_items'][0]['host'])
self.assertEqual(2, resp['_items'][0]['latency'])
def test_1_put_successful_inception(self):
"""
Test put an alignakretention successfully with inception
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get alignakretention
params = {}
response = requests.get(self.backend_address + '/alignakretention', json=params,
auth=self.auth)
resp = response.json()
ar_id = resp['_items'][0]['_id']
data = {'alias': 'modified test'}
headers = {'If-Match': 'foo'}
response = backend.put('/'.join(['alignakretention', ar_id]), data=data, headers=headers,
inception=True)
assert_true(response['_status'] == 'OK')
def test_2_put_exception_method_not_allowed(self):
"""
Test put a user with errors (so exceptions) because PUT method not allowed on this endpoint
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get user admin
params = {"where": {"name": "admin"}}
response = requests.get(self.backend_address + '/user', json=params, auth=self.auth)
resp = response.json()
user_id = resp['_items'][0]['_id']
with assert_raises(BackendException) as cm:
data = {'alias': 'modified with no header'}
backend.put('/'.join(['user', user_id]), data=data)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 1000, str(ex))
with assert_raises(BackendException) as cm:
data = {'alias': 'modified test again and again'}
headers = {'If-Match': "567890987678"}
response = backend.put('/'.join(['user', user_id]), data=data, headers=headers)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 405, str(ex))
def test_3_put_exception_method_allowed(self):
"""
Test put a user with errors (so exceptions) because PUT method not allowed on this endpoint
:return: None
"""
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get alignakretention
params = {}
response = requests.get(self.backend_address + '/alignakretention', json=params,
auth=self.auth)
resp = response.json()
ar_id = resp['_items'][0]['_id']
with assert_raises(BackendException) as cm:
data = {
'host': 'srv001',
'latency': 2,
'last_state_type': 'HARD',
'state': 'UP',
'last_chk': 0,
}
backend.put('/'.join(['alignakretention', ar_id]), data=data)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 1000, str(ex))
with assert_raises(BackendException) as cm:
data = {
'host': 'srv001',
'latency': 2,
'last_state_type': 'HARD',
'state': 'UP',
'last_chk': 0,
}
headers = {'If-Match': "567890987678"}
response = backend.put('/'.join(['alignakretention', ar_id]), data=data,
headers=headers)
ex = cm.exception
print('exception:', str(ex.code))
assert_true(ex.code == 412, str(ex))
def test_4_put_connection_error(self):
"""
Backend connection error when replacing an object...
:return: None
"""
print('test connection error when replacing an object')
# Create client API
backend = Backend(self.backend_address)
backend.login('admin', 'admin')
# get alignakretention
params = {}
response = requests.get(self.backend_address + '/alignakretention', json=params,
auth=self.auth)
resp = response.json()
ar_id = resp['_items'][0]['_id']
ar_etag = resp['_items'][0]['_etag']
print("stop the alignak backend")
self.pid.kill()
with assert_raises(BackendException) as cm:
data = {
'host': 'srv001',
'latency': 2,
'last_state_type': 'HARD',
'state': 'UP',
'last_chk': 0,
}
headers = {'If-Match': ar_etag}
response = backend.put('/'.join(['alignakretention', ar_id]), data=data,
headers=headers)
assert_true(response['_status'] == 'OK')
ex = cm.exception
self.assertEqual(ex.code, 1000)
|
class TestPutClient(unittest2.TestCase):
'''
Test put (replace) items
'''
@classmethod
def setUpClass(cls):
'''
Function used in the beginning of test to prepare the backend
:param module:
:return: None
'''
pass
@classmethod
def tearDownClass(cls):
'''
Stop the backend at the end of the tests
:param module:
:return: None
'''
pass
def test_1_put_successful(self):
'''
Test put an alignakretention successfully
:return: None
'''
pass
def test_1_put_successful_inception(self):
'''
Test put an alignakretention successfully with inception
:return: None
'''
pass
def test_2_put_exception_method_not_allowed(self):
'''
Test put a user with errors (so exceptions) because PUT method not allowed on this endpoint
:return: None
'''
pass
def test_3_put_exception_method_allowed(self):
'''
Test put a user with errors (so exceptions) because PUT method not allowed on this endpoint
:return: None
'''
pass
def test_4_put_connection_error(self):
'''
Backend connection error when replacing an object...
:return: None
'''
pass
| 10 | 8 | 32 | 4 | 22 | 6 | 1 | 0.28 | 1 | 4 | 2 | 0 | 5 | 0 | 7 | 7 | 238 | 35 | 159 | 59 | 149 | 44 | 110 | 54 | 102 | 1 | 1 | 1 | 7 |
3,879 |
Alignak-monitoring-contrib/alignak-backend-client
|
Alignak-monitoring-contrib_alignak-backend-client/alignak_backend_client/client.py
|
alignak_backend_client.client.Backend
|
class Backend(object): # pylint: disable=useless-object-inheritance
"""
Backend client class to communicate with an Alignak backend
Provide the backend endpoint URL to initialize the client (eg. http://127.0.0.1:5000)
"""
def __init__(self, endpoint, processes=1):
"""
Initialize a client connection
:param endpoint: root endpoint (API URL)
:type endpoint: str
"""
self.processes = processes
if endpoint.endswith('/'): # pragma: no cover - test url is complying ...
self.url_endpoint_root = endpoint[0:-1]
else:
self.url_endpoint_root = endpoint
self.session = requests.Session()
self.session.header = {'Content-Type': 'application/json'}
# Needed for retrying requests (104 - Connection reset by peer for example)
methods = ['POST', 'HEAD', 'GET', 'PUT', 'DELETE', 'PATCH']
http_retry = Retry(total=5, connect=5, read=5, backoff_factor=0.1,
method_whitelist=methods)
https_retry = Retry(total=5, connect=5, read=5, backoff_factor=0.1,
method_whitelist=methods)
http_adapter = HTTPAdapter(max_retries=http_retry)
https_adapter = HTTPAdapter(max_retries=https_retry)
self.session.mount('http://', http_adapter)
self.session.mount('https://', https_adapter)
self.authenticated = False
self._token = None
self.proxies = None
self.timeout = None # TODO: Add this option in config file
def get_url(self, endpoint):
"""
Returns the formated full URL endpoint
:param endpoint: str. the relative endpoint to access
:return: str
"""
return urljoin(self.url_endpoint_root, endpoint)
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
# pylint: disable=too-many-arguments
"""
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
"""
logger.debug("Parameters for get_response:")
logger.debug("\t - endpoint: %s", endpoint)
logger.debug("\t - method: %s", method)
logger.debug("\t - headers: %s", headers)
logger.debug("\t - json: %s", json)
logger.debug("\t - params: %s", params)
logger.debug("\t - data: %s", data)
url = self.get_url(endpoint)
# First stage. Errors are connection errors (timeout, no session, ...)
try:
response = self.session.request(method=method, url=url, headers=headers, json=json,
params=params, data=data, proxies=self.proxies,
timeout=self.timeout)
logger.debug("response headers: %s", response.headers)
logger.debug("response content: %s", response.content)
except RequestException as e:
response = {"_status": "ERR",
"_error": {"message": e, "code": BACKEND_ERROR},
"_issues": {"message": e, "code": BACKEND_ERROR}}
raise BackendException(code=BACKEND_ERROR,
message=e,
response=response)
else:
return response
@staticmethod
def decode(response):
"""
Decodes and returns the response as JSON (dict) or raise BackendException
:param response: requests.response object
:return: dict
"""
# Second stage. Errors are backend errors (bad login, bad url, ...)
try:
response.raise_for_status()
except requests.HTTPError as e:
raise BackendException(code=response.status_code,
message=e,
response=response)
else:
resp_json = response.json()
# Catch errors not sent in a HTTP error
error = resp_json.get('_error', None)
if error:
raise BackendException(code=error['code'],
message=error['message'],
response=response)
return resp_json
def set_token(self, token):
"""
Set token in authentification for next requests
:param token: str. token to set in auth. If None, reinit auth
"""
if token:
auth = HTTPBasicAuth(token, '')
self._token = token
self.authenticated = True # TODO: Remove this parameter
self.session.auth = auth
logger.debug("Using session token: %s", token)
else:
self._token = None
self.authenticated = False
self.session.auth = None
logger.debug("Session token/auth reinitialised")
def get_token(self):
"""Get the stored backend token"""
return self._token
token = property(get_token, set_token)
def login(self, username, password, generate='enabled', proxies=None):
"""
Log into the backend and get the token
generate parameter may have following values:
- enabled: require current token (default)
- force: force new token generation
- disabled
if login is:
- accepted, returns True
- refused, returns False
In case of any error, raises a BackendException
:param username: login name
:type username: str
:param password: password
:type password: str
:param generate: Can have these values: enabled | force | disabled
:type generate: str
:param proxies: dict of proxy (http and / or https)
:type proxies: dict
:return: return True if authentication is successfull, otherwise False
:rtype: bool
"""
logger.debug("login for: %s with generate: %s", username, generate)
if not username or not password:
raise BackendException(BACKEND_ERROR, "Missing mandatory parameters")
if proxies:
for key in proxies.keys():
try:
assert key in PROXY_PROTOCOLS
except AssertionError:
raise BackendException(BACKEND_ERROR, "Wrong proxy protocol ", key)
self.proxies = proxies
endpoint = 'login'
json = {u'username': username, u'password': password}
if generate == 'force':
json['action'] = 'generate'
logger.debug("Asking for generating new token")
response = self.get_response(method='POST', endpoint=endpoint, json=json)
if response.status_code == 401:
logger.error("Backend refused login with params %s", json)
self.set_token(token=None)
return False
resp = self.decode(response=response)
if 'token' in resp:
self.set_token(token=resp['token'])
return True
if generate == 'force': # pragma: no cover - need specific backend tests
self.set_token(token=None)
raise BackendException(BACKEND_ERROR, "Token not provided")
if generate == 'disabled': # pragma: no cover - need specific backend tests
logger.error("Token disabled ... to be implemented!")
return False
if generate == 'enabled': # pragma: no cover - need specific backend tests
logger.warning("Token enabled, but none provided, require new token generation")
return self.login(username, password, 'force')
return False # pragma: no cover - unreachable ...
def logout(self):
"""
Logout from the backend
:return: return True if logout is successfull, otherwise False
:rtype: bool
"""
logger.debug("request backend logout")
if not self.authenticated:
logger.warning("Unnecessary logout ...")
return True
endpoint = 'logout'
_ = self.get_response(method='POST', endpoint=endpoint)
self.session.close()
self.set_token(token=None)
return True
def get_domains(self):
"""
Connect to alignak backend and retrieve all available child endpoints of root
If connection is successful, returns a list of all the resources available in the backend:
Each resource is identified with its title and provides its endpoint relative to backend
root endpoint.::
[
{u'href': u'loghost', u'title': u'loghost'},
{u'href': u'escalation', u'title': u'escalation'},
...
]
If an error occurs a BackendException is raised.
If an exception occurs, it is raised to caller.
:return: list of available resources
:rtype: list
"""
resp = self.get('')
if "_links" in resp:
_links = resp["_links"]
if "child" in _links:
return _links["child"]
return {} # pragma: no cover - should never occur!
def get(self, endpoint, params=None):
"""
Get items or item in alignak backend
If an error occurs, a BackendException is raised.
This method builds a response as a dictionary that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: parameters for the backend API
:type params: dict
:return: dictionary as specified upper
:rtype: dict
"""
response = self.get_response(method='GET', endpoint=endpoint, params=params)
resp = self.decode(response=response)
if '_status' not in resp: # pragma: no cover - need specific backend tests
resp['_status'] = 'OK' # TODO: Sure??
return resp
def get_all(self, endpoint, params=None):
# pylint: disable=too-many-locals
"""
Get all items in the specified endpoint of alignak backend
If an error occurs, a BackendException is raised.
If the max_results parameter is not specified in parameters, it is set to
BACKEND_PAGINATION_LIMIT (backend maximum value) to limit requests number.
This method builds a response that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: list of parameters for the backend API
:type params: dict
:return: dict of properties
:rtype: dict
"""
# Set max results at maximum value supported by the backend to limit requests number
if not params:
params = {'max_results': BACKEND_PAGINATION_LIMIT}
elif params and 'max_results' not in params:
params['max_results'] = BACKEND_PAGINATION_LIMIT
# Get first page
last_page = False
items = []
if self.processes == 1:
while not last_page:
# Get elements ...
resp = self.get(endpoint=endpoint, params=params)
# Response contains:
# _items:
# ...
# _links:
# self, parent, prev, last, next
# _meta:
# - max_results, total, page
if 'next' in resp['_links']:
# Go to next page ...
params['page'] = int(resp['_meta']['page']) + 1
params['max_results'] = int(resp['_meta']['max_results'])
else:
last_page = True
items.extend(resp['_items'])
else:
def get_pages(endpoint, params, pages, out_q):
"""
Function to get pages loaded by multiprocesses
:param endpoint: endpoint to get data
:type endpoint: string
:param params: parameters for get request
:type params: dict
:param pages: range of pages to get
:type pages: list
:param out_q: Queue object
:type out_q: multiprocessing.Queue
:return: None
"""
multi_items = []
for page in pages:
params['page'] = page
resp = self.get(endpoint, params)
multi_items.extend(resp['_items'])
out_q.put(multi_items)
# Get first page
resp = self.get(endpoint, params)
number_pages = int(math.ceil(
float(resp['_meta']['total']) / float(resp['_meta']['max_results'])))
out_q = multiprocessing.Queue()
chunksize = int(math.ceil(number_pages / float(self.processes)))
procs = []
for i in range(self.processes):
begin = i * chunksize
end = begin + chunksize
if end > number_pages:
end = number_pages
begin += 1
end += 1
p = multiprocessing.Process(target=get_pages,
args=(endpoint, params, range(begin, end), out_q))
procs.append(p)
p.start()
# Collect all results into a single result dict. We know how many dicts
# with results to expect.
for i in range(self.processes):
items.extend(out_q.get())
# Wait for all worker processes to finish
for p in procs:
p.join()
return {
'_items': items,
'_status': 'OK'
}
def post(self, endpoint, data, files=None, headers=None):
# pylint: disable=unused-argument
"""
Create a new item
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to create
:type data: dict
:param files: Not used. To be implemented
:type files: None
:param headers: headers (example: Content-Type)
:type headers: dict
:return: response (creation information)
:rtype: dict
"""
# We let Requests encode data to json
response = self.get_response(method='POST', endpoint=endpoint, json=data, headers=headers)
resp = self.decode(response=response)
# TODO: Add files support (cf. Requests - post-a-multipart-encoded-file)
# try:
# if not files:
# response = requests.post(
# urljoin(self.url_endpoint_root, endpoint),
# data=data,
# files=files,
# headers=headers,
# auth=HTTPBasicAuth(self.token, '')
# )
# resp = response.json()
# else:
# # Posting files is not yet used, but reserved for future use...
# response = requests.post(
# urljoin(self.url_endpoint_root, endpoint),
# data=data,
# files=files,
# auth=HTTPBasicAuth(self.token, '')
# )
# resp = json.loads(response.content)
return resp
def patch(self, endpoint, data, headers=None, inception=False):
"""
Method to update an item
The headers must include an If-Match containing the object _etag.
headers = {'If-Match': contact_etag}
The data dictionary contain the fields that must be modified.
If the patching fails because the _etag object do not match with the provided one, a
BackendException is raised with code = 412.
If inception is True, this method makes e new get request on the endpoint to refresh the
_etag and then a new patch is called.
If an HTTP 412 error occurs, a BackendException is raised. This exception is:
- code: 412
- message: response content
- response: backend response
All other HTTP error raises a BackendException.
If some _issues are provided by the backend, this exception is:
- code: HTTP error code
- message: response content
- response: JSON encoded backend response (including '_issues' dictionary ...)
If no _issues are provided and an _error is signaled by the backend, this exception is:
- code: backend error code
- message: backend error message
- response: JSON encoded backend response
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to update
:type data: dict
:param headers: headers (example: Content-Type). 'If-Match' required
:type headers: dict
:param inception: if True tries to get the last _etag
:type inception: bool
:return: dictionary containing patch response from the backend
:rtype: dict
"""
if not headers:
raise BackendException(BACKEND_ERROR, "Header If-Match required for patching an object")
response = self.get_response(method='PATCH', endpoint=endpoint, json=data, headers=headers)
if response.status_code == 200:
return self.decode(response=response)
if response.status_code == 412:
# 412 means Precondition failed, but confirm ...
if inception:
# update etag and retry to patch
resp = self.get(endpoint)
headers = {'If-Match': resp['_etag']}
return self.patch(endpoint, data=data, headers=headers, inception=False)
raise BackendException(response.status_code, response.content)
else: # pragma: no cover - should never occur
raise BackendException(response.status_code, response.content)
def put(self, endpoint, data, headers=None, inception=False):
"""
Method to replace an item
The headers must include an If-Match containing the object _etag.
headers = {'If-Match': contact_etag}
The data dictionary contain all fields.
If the puting fails because the _etag object do not match with the provided one, a
BackendException is raised with code = 412.
If inception is True, this method makes a new get request on the endpoint to refresh the
_etag and then a new put is called.
If an HTTP 412 error occurs, a BackendException is raised. This exception is:
- code: 412
- message: response content
- response: backend response
All other HTTP error raises a BackendException.
If some _issues are provided by the backend, this exception is:
- code: HTTP error code
- message: response content
- response: JSON encoded backend response (including '_issues' dictionary ...)
If no _issues are provided and an _error is signaled by the backend, this exception is:
- code: backend error code
- message: backend error message
- response: JSON encoded backend response
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to update
:type data: dict
:param headers: headers (example: Content-Type). 'If-Match' required
:type headers: dict
:param inception: if True tries to get the last _etag
:type inception: bool
:return: dictionary containing put response from the backend
:rtype: dict
"""
if not headers:
raise BackendException(BACKEND_ERROR, "Header If-Match required for puting an object")
response = self.get_response(method='PUT', endpoint=endpoint, json=data, headers=headers)
if response.status_code == 200:
return self.decode(response=response)
if response.status_code == 412:
# 412 means Precondition failed, but confirm ...
if inception:
# update etag and retry to patch
resp = self.get(endpoint)
headers = {'If-Match': resp['_etag']}
return self.patch(endpoint, data=data, headers=headers, inception=False)
raise BackendException(response.status_code, response.content)
else: # pragma: no cover - should never occur
raise BackendException(response.status_code, response.content)
def delete(self, endpoint, headers):
"""
Method to delete an item or all items
headers['If-Match'] must contain the _etag identifier of the element to delete
:param endpoint: endpoint (API URL)
:type endpoint: str
:param headers: headers (example: Content-Type)
:type headers: dict
:return: response (deletion information)
:rtype: dict
"""
response = self.get_response(method='DELETE', endpoint=endpoint, headers=headers)
logger.debug("delete, response: %s", response)
if response.status_code != 204: # pragma: no cover - should not happen ...
resp = self.decode(response=response)
resp = {"_status": "OK"}
return resp
|
class Backend(object):
'''
Backend client class to communicate with an Alignak backend
Provide the backend endpoint URL to initialize the client (eg. http://127.0.0.1:5000)
'''
def __init__(self, endpoint, processes=1):
'''
Initialize a client connection
:param endpoint: root endpoint (API URL)
:type endpoint: str
'''
pass
def get_url(self, endpoint):
'''
Returns the formated full URL endpoint
:param endpoint: str. the relative endpoint to access
:return: str
'''
pass
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
'''
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
'''
pass
@staticmethod
def decode(response):
'''
Decodes and returns the response as JSON (dict) or raise BackendException
:param response: requests.response object
:return: dict
'''
pass
def set_token(self, token):
'''
Set token in authentification for next requests
:param token: str. token to set in auth. If None, reinit auth
'''
pass
def get_token(self):
'''Get the stored backend token'''
pass
def login(self, username, password, generate='enabled', proxies=None):
'''
Log into the backend and get the token
generate parameter may have following values:
- enabled: require current token (default)
- force: force new token generation
- disabled
if login is:
- accepted, returns True
- refused, returns False
In case of any error, raises a BackendException
:param username: login name
:type username: str
:param password: password
:type password: str
:param generate: Can have these values: enabled | force | disabled
:type generate: str
:param proxies: dict of proxy (http and / or https)
:type proxies: dict
:return: return True if authentication is successfull, otherwise False
:rtype: bool
'''
pass
def logout(self):
'''
Logout from the backend
:return: return True if logout is successfull, otherwise False
:rtype: bool
'''
pass
def get_domains(self):
'''
Connect to alignak backend and retrieve all available child endpoints of root
If connection is successful, returns a list of all the resources available in the backend:
Each resource is identified with its title and provides its endpoint relative to backend
root endpoint.::
[
{u'href': u'loghost', u'title': u'loghost'},
{u'href': u'escalation', u'title': u'escalation'},
...
]
If an error occurs a BackendException is raised.
If an exception occurs, it is raised to caller.
:return: list of available resources
:rtype: list
'''
pass
def get_url(self, endpoint):
'''
Get items or item in alignak backend
If an error occurs, a BackendException is raised.
This method builds a response as a dictionary that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: parameters for the backend API
:type params: dict
:return: dictionary as specified upper
:rtype: dict
'''
pass
def get_all(self, endpoint, params=None):
'''
Get all items in the specified endpoint of alignak backend
If an error occurs, a BackendException is raised.
If the max_results parameter is not specified in parameters, it is set to
BACKEND_PAGINATION_LIMIT (backend maximum value) to limit requests number.
This method builds a response that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: list of parameters for the backend API
:type params: dict
:return: dict of properties
:rtype: dict
'''
pass
def get_pages(endpoint, params, pages, out_q):
'''
Function to get pages loaded by multiprocesses
:param endpoint: endpoint to get data
:type endpoint: string
:param params: parameters for get request
:type params: dict
:param pages: range of pages to get
:type pages: list
:param out_q: Queue object
:type out_q: multiprocessing.Queue
:return: None
'''
pass
def post(self, endpoint, data, files=None, headers=None):
'''
Create a new item
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to create
:type data: dict
:param files: Not used. To be implemented
:type files: None
:param headers: headers (example: Content-Type)
:type headers: dict
:return: response (creation information)
:rtype: dict
'''
pass
def patch(self, endpoint, data, headers=None, inception=False):
'''
Method to update an item
The headers must include an If-Match containing the object _etag.
headers = {'If-Match': contact_etag}
The data dictionary contain the fields that must be modified.
If the patching fails because the _etag object do not match with the provided one, a
BackendException is raised with code = 412.
If inception is True, this method makes e new get request on the endpoint to refresh the
_etag and then a new patch is called.
If an HTTP 412 error occurs, a BackendException is raised. This exception is:
- code: 412
- message: response content
- response: backend response
All other HTTP error raises a BackendException.
If some _issues are provided by the backend, this exception is:
- code: HTTP error code
- message: response content
- response: JSON encoded backend response (including '_issues' dictionary ...)
If no _issues are provided and an _error is signaled by the backend, this exception is:
- code: backend error code
- message: backend error message
- response: JSON encoded backend response
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to update
:type data: dict
:param headers: headers (example: Content-Type). 'If-Match' required
:type headers: dict
:param inception: if True tries to get the last _etag
:type inception: bool
:return: dictionary containing patch response from the backend
:rtype: dict
'''
pass
def put(self, endpoint, data, headers=None, inception=False):
'''
Method to replace an item
The headers must include an If-Match containing the object _etag.
headers = {'If-Match': contact_etag}
The data dictionary contain all fields.
If the puting fails because the _etag object do not match with the provided one, a
BackendException is raised with code = 412.
If inception is True, this method makes a new get request on the endpoint to refresh the
_etag and then a new put is called.
If an HTTP 412 error occurs, a BackendException is raised. This exception is:
- code: 412
- message: response content
- response: backend response
All other HTTP error raises a BackendException.
If some _issues are provided by the backend, this exception is:
- code: HTTP error code
- message: response content
- response: JSON encoded backend response (including '_issues' dictionary ...)
If no _issues are provided and an _error is signaled by the backend, this exception is:
- code: backend error code
- message: backend error message
- response: JSON encoded backend response
:param endpoint: endpoint (API URL)
:type endpoint: str
:param data: properties of item to update
:type data: dict
:param headers: headers (example: Content-Type). 'If-Match' required
:type headers: dict
:param inception: if True tries to get the last _etag
:type inception: bool
:return: dictionary containing put response from the backend
:rtype: dict
'''
pass
def delete(self, endpoint, headers):
'''
Method to delete an item or all items
headers['If-Match'] must contain the _etag identifier of the element to delete
:param endpoint: endpoint (API URL)
:type endpoint: str
:param headers: headers (example: Content-Type)
:type headers: dict
:return: response (deletion information)
:rtype: dict
'''
pass
| 18 | 17 | 36 | 5 | 14 | 18 | 3 | 1.22 | 1 | 10 | 1 | 0 | 14 | 7 | 15 | 15 | 586 | 100 | 225 | 71 | 207 | 275 | 200 | 68 | 183 | 11 | 1 | 3 | 54 |
3,880 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/complexexpression.py
|
alignak.complexexpression.ComplexExpressionFactory
|
class ComplexExpressionFactory(object):
"""ComplexExpressionFactory provides complex expression parsing functions
"""
def __init__(self, ctx='hostgroups', grps=None, all_elements=None):
self.ctx = ctx
self.grps = grps
self.all_elements = all_elements
def eval_cor_pattern(self, pattern): # pylint:disable=too-many-branches
"""Parse and build recursively a tree of ComplexExpressionNode from pattern
:param pattern: pattern to parse
:type pattern: str
:return: root node of parsed tree
:type: alignak.complexexpression.ComplexExpressionNode
"""
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()+&|,':
if char in pattern:
complex_node = True
node = ComplexExpressionNode()
# if it's a single expression like !linux or production
# (where "linux" and "production" are hostgroup names)
# we will get the objects from it and return a leaf node
if not complex_node:
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
node.operand = self.ctx
node.leaf = True
obj, error = self.find_object(pattern)
if obj is not None:
node.content = obj
else:
node.configuration_errors.append(error)
return node
in_par = False
tmp = ''
stacked_par = 0
for char in pattern:
if char in (',', '|'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '|'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char in ('&', '+'):
# Maybe we are in a par, if so, just stack it
if in_par:
tmp += char
else:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
node.operand = '&'
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
tmp = ''
elif char == '(':
stacked_par += 1
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_par == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near", tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_par > 1:
tmp += char
elif char == ')':
stacked_par -= 1
if stacked_par < 0:
# TODO : real error
print("Error : bad expression near", tmp, "too much ')'")
continue
if stacked_par == 0:
tmp = tmp.strip()
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += char
# Maybe it's a classic character, if so, continue
else:
tmp += char
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
son = self.eval_cor_pattern(tmp)
node.sons.append(son)
return node
def find_object(self, pattern):
"""Get a list of host corresponding to the pattern regarding the context
:param pattern: pattern to find
:type pattern: str
:return: Host list matching pattern (hostgroup name, template, all)
:rtype: list[alignak.objects.host.Host]
"""
obj = None
error = None
pattern = pattern.strip()
if pattern == '*':
obj = [h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_a_template()]
return obj, error
# Ok a more classic way
if self.ctx == 'hostgroups':
# Ok try to find this hostgroup
hgr = self.grps.find_by_name(pattern)
# Maybe it's an known one?
if not hgr:
error = "Error : cannot find the %s of the expression '%s'" % (self.ctx, pattern)
return hgr, error
# Ok the group is found, get the elements!
elts = hgr.get_hosts()
elts = strip_and_uniq(elts)
# Maybe the hostgroup memebrs is '*', if so expand with all hosts
if '*' in elts:
elts.extend([h.host_name for h in list(self.all_elements.items.values())
if getattr(h, 'host_name', '') != '' and not h.is_a_template()])
# And remove this strange hostname too :)
elts.remove('*')
return elts, error
obj = self.grps.find_hosts_that_use_template(pattern)
return obj, error
|
class ComplexExpressionFactory(object):
'''ComplexExpressionFactory provides complex expression parsing functions
'''
def __init__(self, ctx='hostgroups', grps=None, all_elements=None):
pass
def eval_cor_pattern(self, pattern):
'''Parse and build recursively a tree of ComplexExpressionNode from pattern
:param pattern: pattern to parse
:type pattern: str
:return: root node of parsed tree
:type: alignak.complexexpression.ComplexExpressionNode
'''
pass
def find_object(self, pattern):
'''Get a list of host corresponding to the pattern regarding the context
:param pattern: pattern to find
:type pattern: str
:return: Host list matching pattern (hostgroup name, template, all)
:rtype: list[alignak.objects.host.Host]
'''
pass
| 4 | 3 | 53 | 8 | 33 | 13 | 9 | 0.42 | 1 | 2 | 1 | 0 | 3 | 3 | 3 | 3 | 166 | 26 | 99 | 19 | 95 | 42 | 90 | 19 | 86 | 20 | 1 | 4 | 26 |
3,881 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/complexexpression.py
|
alignak.complexexpression.ComplexExpressionNode
|
class ComplexExpressionNode(object):
"""
ComplexExpressionNode is a node class for complex_expression(s)
"""
def __init__(self):
self.operand = None
self.sons = []
self.configuration_errors = []
self.not_value = False
# If leaf, the content will be the hostgroup or hosts
# that are selected with this node
self.leaf = False
self.content = None
def __str__(self): # pragma: no cover
if not self.leaf:
return "Op:'%s' Leaf:%s Sons:'[%s] IsNot:%s'" % \
(self.operand, self.leaf, ','.join([str(s) for s in self.sons]), self.not_value)
return 'IS LEAF %s' % self.content
def resolve_elements(self):
"""Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
"""
# If it's a leaf, we just need to dump a set with the content of the node
if self.leaf:
if not self.content:
return set()
return set(self.content)
# first got the not ones in a list, and the other in the other list
not_nodes = [s for s in self.sons if s.not_value]
positiv_nodes = [s for s in self.sons if not s.not_value] # ok a not not is hard to read..
# By default we are using a OR rule
if not self.operand:
self.operand = '|'
res = set()
# The operand will change the positiv loop only
i = 0
for node in positiv_nodes:
node_members = node.resolve_elements()
if self.operand == '|':
res = res.union(node_members)
elif self.operand == '&':
# The first elements of an AND rule should be used
if i == 0:
res = node_members
else:
res = res.intersection(node_members)
i += 1
# And we finally remove all NOT elements from the result
for node in not_nodes:
node_members = node.resolve_elements()
res = res.difference(node_members)
return res
def is_valid(self):
"""
Check if all leaves are correct (no error)
:return: True if correct, else False
:rtype: bool
TODO: Fix this function and use it.
DependencyNode should be ComplexExpressionNode
Should return true on a leaf
"""
valid = True
if not self.sons:
valid = False
else:
for son in self.sons:
if isinstance(son, DependencyNode) and not son.is_valid():
self.configuration_errors.extend(son.configuration_errors)
valid = False
return valid
|
class ComplexExpressionNode(object):
'''
ComplexExpressionNode is a node class for complex_expression(s)
'''
def __init__(self):
pass
def __str__(self):
pass
def resolve_elements(self):
'''Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
'''
pass
def is_valid(self):
'''
Check if all leaves are correct (no error)
:return: True if correct, else False
:rtype: bool
TODO: Fix this function and use it.
DependencyNode should be ComplexExpressionNode
Should return true on a leaf
'''
pass
| 5 | 3 | 20 | 3 | 12 | 6 | 4 | 0.54 | 1 | 3 | 1 | 0 | 4 | 6 | 4 | 4 | 85 | 13 | 48 | 19 | 43 | 26 | 44 | 19 | 39 | 9 | 1 | 3 | 16 |
3,882 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/contactdowntime.py
|
alignak.contactdowntime.ContactDowntime
|
class ContactDowntime(AlignakObject):
"""ContactDowntime class allows a contact to be in downtime. During this time
the contact won't get notifications
"""
properties = {
'start_time':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'end_time':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'author':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'comment':
StringProp(default=u''),
'is_in_effect':
BoolProp(default=False),
'can_be_deleted':
BoolProp(default=False),
'ref':
StringProp(default=u''),
}
# Schedule a contact downtime. It's far more easy than a host/service
# one because we got a beginning, and an end. That's all for running.
# got also an author and a comment for logging purpose.
def __init__(self, params, parsing=False):
super(ContactDowntime, self).__init__(params, parsing=parsing)
self.fill_default()
def check_activation(self, contacts):
"""Enter or exit downtime if necessary
:return: None
"""
now = time.time()
was_is_in_effect = self.is_in_effect
self.is_in_effect = (self.start_time <= now <= self.end_time)
# Raise a log entry when we get in the downtime
if not was_is_in_effect and self.is_in_effect:
self.enter(contacts)
# Same for exit purpose
if was_is_in_effect and not self.is_in_effect:
self.exit(contacts)
def in_scheduled_downtime(self):
"""Getter for is_in_effect attribute
:return: True if downtime is active, False otherwise
:rtype: bool
"""
return self.is_in_effect
def enter(self, contacts):
"""Wrapper to call raise_enter_downtime_log_entry for ref (host/service)
:return: None
"""
contact = contacts[self.ref]
contact.raise_enter_downtime_log_entry()
def exit(self, contacts):
"""Wrapper to call raise_exit_downtime_log_entry for ref (host/service)
set can_be_deleted to True
:return: None
"""
contact = contacts[self.ref]
contact.raise_exit_downtime_log_entry()
self.can_be_deleted = True
def cancel(self, contacts):
"""Wrapper to call raise_cancel_downtime_log_entry for ref (host/service)
set can_be_deleted to True
set is_in_effect to False
:return: None
"""
self.is_in_effect = False
contact = contacts[self.ref]
contact.raise_cancel_downtime_log_entry()
self.can_be_deleted = True
|
class ContactDowntime(AlignakObject):
'''ContactDowntime class allows a contact to be in downtime. During this time
the contact won't get notifications
'''
def __init__(self, params, parsing=False):
pass
def check_activation(self, contacts):
'''Enter or exit downtime if necessary
:return: None
'''
pass
def in_scheduled_downtime(self):
'''Getter for is_in_effect attribute
:return: True if downtime is active, False otherwise
:rtype: bool
'''
pass
def enter(self, contacts):
'''Wrapper to call raise_enter_downtime_log_entry for ref (host/service)
:return: None
'''
pass
def exit(self, contacts):
'''Wrapper to call raise_exit_downtime_log_entry for ref (host/service)
set can_be_deleted to True
:return: None
'''
pass
def cancel(self, contacts):
'''Wrapper to call raise_cancel_downtime_log_entry for ref (host/service)
set can_be_deleted to True
set is_in_effect to False
:return: None
'''
pass
| 7 | 6 | 9 | 1 | 4 | 4 | 1 | 0.64 | 1 | 1 | 0 | 0 | 6 | 2 | 6 | 9 | 86 | 17 | 42 | 15 | 35 | 27 | 27 | 15 | 20 | 3 | 2 | 1 | 8 |
3,883 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daemon.py
|
alignak.daemon.EnvironmentFile
|
class EnvironmentFile(Exception):
"""Exception raised when the Alignak environment file is missing or corrupted"""
def __init__(self, msg):
Exception.__init__(self, msg)
|
class EnvironmentFile(Exception):
'''Exception raised when the Alignak environment file is missing or corrupted'''
def __init__(self, msg):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 11 | 5 | 1 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
3,884 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daemons/arbiterdaemon.py
|
alignak.daemons.arbiterdaemon.Arbiter
|
class Arbiter(Daemon): # pylint: disable=too-many-instance-attributes
"""
Arbiter class. Referenced as "app" in most Interface
Class to manage the Arbiter daemon.
The Arbiter is the one that rules them all...
"""
properties = Daemon.properties.copy()
properties.update({
'type':
StringProp(default='arbiter'),
'port':
IntegerProp(default=7770),
'legacy_cfg_files':
ListProp(default=[]),
})
def __init__(self, **kwargs):
"""Arbiter daemon initialisation
:param kwargs: command line arguments
"""
# The monitored objects configuration files
self.legacy_cfg_files = []
# My daemons...
self.daemons_last_check = 0
self.daemons_last_reachable_check = 0
self.my_daemons = {}
# My report monitor interface and status
self.my_monitor = None
self.my_status = 0
# Verify mode is set thanks to the -V command line parameter
self.verify_only = False
if 'verify_only' in kwargs and kwargs['verify_only']:
self.verify_only = kwargs.get('verify_only', False)
super(Arbiter, self).__init__(kwargs.get('daemon_name', 'Default-Arbiter'), **kwargs)
# Our schedulers and arbiters are initialized in the base class
# Specific arbiter command line parameters
if 'legacy_cfg_files' in kwargs and kwargs['legacy_cfg_files']:
logger.warning(
"Using daemon configuration file is now deprecated. The arbiter daemon -a "
"parameter should not be used anymore. Use the -e environment file "
"parameter to provide a global Alignak configuration file. "
"** Note that this feature is not removed because it is still used "
"for the unit tests of the Alignak framework! If some monitoring files are "
"present in the command line parameters, they will supersede the ones "
"declared in the environment configuration file.")
# Monitoring files in the arguments extend the ones defined
# in the environment configuration file
self.legacy_cfg_files.extend(kwargs['legacy_cfg_files'])
logger.warning("Got some configuration files: %s", self.legacy_cfg_files)
# if not self.legacy_cfg_files:
# sys.exit("The Alignak environment file is not existing "
# "or do not define any monitoring configuration files. "
# "The arbiter can not start correctly.")
# Make sure the configuration files are not repeated...
my_cfg_files = []
for cfg_file in self.legacy_cfg_files:
logger.debug("- configuration file: %s / %s", cfg_file, os.path.abspath(cfg_file))
if os.path.abspath(cfg_file) not in my_cfg_files:
my_cfg_files.append(os.path.abspath(cfg_file))
self.legacy_cfg_files = my_cfg_files
self.alignak_name = self.name
if 'alignak_name' in kwargs and kwargs['alignak_name']:
self.alignak_name = kwargs['alignak_name']
self.arbiter_name = self.alignak_name
# Dump system health, defaults to report every 5 loop count
self.system_health = False
self.system_health_period = 5
if 'ALIGNAK_SYSTEM_MONITORING' in os.environ:
self.system_health = True
try:
self.system_health_period = int(os.environ.get('ALIGNAK_SYSTEM_MONITORING', '5'))
except ValueError: # pragma: no cover, simple protection
pass
# This because it is the Satellite that has these properties and I am a Daemon
# todo: change this?
# My own broks
self.broks = []
self.broks_lock = threading.RLock()
# My own monitoring events
self.events = []
self.events_lock = threading.RLock()
# Queue to keep the recent events
self.recent_events = None
self.is_master = False
self.link_to_myself = None
self.instance_id = None
# Now an external commands manager and a list for the external_commands
self.external_commands_manager = None
self.external_commands = []
self.external_commands_lock = threading.RLock()
# Used to check if we must still run or not - only for a spare arbiter
self.must_run = True
# Did we received a kill signal
self.kill_request = False
self.kill_timestamp = 0
# All dameons connection are valid
self.all_connected = False
# Configuration loading / reloading
self.need_config_reload = False
self.loading_configuration = False
self.http_interface = ArbiterInterface(self)
self.conf = Config()
def add(self, elt):
"""Generic function to add objects to the daemon internal lists.
Manage Broks, External commands
:param elt: objects to add
:type elt: alignak.AlignakObject
:return: None
"""
if isinstance(elt, Brok):
# For brok, we tag the brok with our instance_id
elt.instance_id = self.instance_id
if elt.type == 'monitoring_log':
# The brok is a monitoring event
with self.events_lock:
self.events.append(elt)
statsmgr.counter('events', 1)
# Also add to our broks
with self.broks_lock:
self.broks.append(elt)
statsmgr.counter('broks.added', 1)
elif isinstance(elt, ExternalCommand):
logger.debug("Queuing an external command '%s'", str(elt.__dict__))
with self.external_commands_lock:
self.external_commands.append(elt)
statsmgr.counter('external-commands.added', 1)
else: # pragma: no cover, simple dev alerting
logger.error('Do not manage object type %s (%s)', type(elt), elt)
def get_managed_configurations(self): # pylint: disable=no-self-use
"""Get the configuration managed by this arbiter
This is used by the master arbiter to get information from its spare arbiter
:return: a dict of arbiter links (only one) with instance_id as key and
hash, push_flavor and configuration identifier as values
:rtype: dict
"""
res = {}
# Todo: improve this for an arbiter spare
# for arbiter_link in self.conf.arbiters:
# if arbiter_link == self.link_to_myself:
# # Not myself ;)
# continue
# res[arbiter_link.instance_id] = {
# 'hash': arbiter_link.hash,
# 'push_flavor': arbiter_link.push_flavor,
# 'managed_conf_id': arbiter_link.managed_conf_id
# }
logger.debug("Get managed configuration: %s", res)
return res
def push_broks_to_broker(self): # pragma: no cover - not used!
"""Send all broks from arbiter internal list to broker
The arbiter get some broks and then pushes them to all the brokers.
:return: None
"""
someone_is_concerned = False
sent = False
for broker_link in self.conf.brokers:
# Send only if the broker is concerned...
if not broker_link.manage_arbiters:
continue
someone_is_concerned = True
if broker_link.reachable:
logger.debug("Sending %d broks to the broker %s", len(self.broks), broker_link.name)
if broker_link.push_broks(self.broks):
statsmgr.counter('broks.pushed.count', len(self.broks))
sent = True
if not someone_is_concerned or sent:
# No one is anymore interested with...
del self.broks[:]
def push_external_commands_to_schedulers(self): # pragma: no cover - not used!
"""Send external commands to schedulers
:return: None
"""
# Now get all external commands and push them to the schedulers
for external_command in self.external_commands:
self.external_commands_manager.resolve_command(external_command)
# Now for all reachable schedulers, send the commands
sent = False
for scheduler_link in self.conf.schedulers:
ext_cmds = scheduler_link.external_commands
if ext_cmds and scheduler_link.reachable:
logger.debug("Sending %d commands to the scheduler %s",
len(ext_cmds), scheduler_link.name)
if scheduler_link.push_external_commands(ext_cmds):
statsmgr.counter('external-commands.pushed.count', len(ext_cmds))
sent = True
if sent:
# Clean the pushed commands
scheduler_link.external_commands.clear()
def get_external_commands(self):
"""Get the external commands
:return: External commands list
:rtype: list
"""
res = self.external_commands
logger.debug("Get and clear external commands list: %s", res)
self.external_commands = []
return res
def get_broks_from_satellites(self): # pragma: no cover - not used!
"""Get broks from my all internal satellite links
The arbiter get the broks from ALL the known satellites
:return: None
"""
for satellites in [self.conf.brokers, self.conf.schedulers,
self.conf.pollers, self.conf.reactionners, self.conf.receivers]:
for satellite in satellites:
# Get only if reachable...
if not satellite.reachable:
continue
logger.debug("Getting broks from: %s", satellite.name)
new_broks = satellite.get_and_clear_broks()
if new_broks:
logger.debug("Got %d broks from: %s", len(new_broks), satellite.name)
for brok in new_broks:
self.add(brok)
def get_initial_broks_from_satellites(self):
"""Get initial broks from my internal satellite links
:return: None
"""
for satellites in [self.conf.brokers, self.conf.schedulers,
self.conf.pollers, self.conf.reactionners, self.conf.receivers]:
for satellite in satellites:
# Get only if reachable...
if not satellite.reachable:
continue
logger.debug("Getting initial brok from: %s", satellite.name)
brok = satellite.get_initial_status_brok()
logger.debug("Satellite '%s' initial brok: %s", satellite.name, brok)
self.add(brok)
def load_monitoring_config_file(self, clean=True):
# pylint: disable=too-many-branches,too-many-statements, too-many-locals
"""Load main configuration file (alignak.cfg)::
* Read all files given in the -c parameters
* Read all .cfg files in cfg_dir
* Read all files in cfg_file
* Create objects (Arbiter, Module)
* Set HTTP links info (ssl etc)
* Load its own modules
* Execute read_configuration hook (for arbiter modules)
* Create all objects (Service, Host, Realms ...)
* "Compile" configuration (Linkify, explode, apply inheritance, fill default values ...)
* Cut conf into parts and prepare it for sending
The clean parameter is useful to load a configuration without removing the properties
only used to parse the configuration and create the objects. Some utilities (like
alignak-backend-import script) may need to avoid the cleaning ;)
Note that default is no cleaning!
:param clean: set True to clean the created items
:type clean: bool
:return: None
"""
self.loading_configuration = True
_t_configuration = time.time()
if self.verify_only:
# Force adding a console handler to the Alignak logger
set_log_console(logging.INFO if not self.debug else logging.DEBUG)
# Force the global logger at INFO level
set_log_level(logging.INFO if not self.debug else logging.DEBUG, handlers=True)
logger.info("-----")
logger.info("Arbiter is in configuration check mode")
logger.info("Arbiter log level got increased to a minimum of INFO")
logger.info("-----")
# Maybe we do not have environment file
# if not self.alignak_env:
# self.exit_on_error("*** No Alignak environment file. Exiting...", exit_code=2)
# else:
# logger.info("Environment file: %s", self.env_filename)
if self.legacy_cfg_files:
logger.info("Loading monitored system configuration from legacy files: %s",
self.legacy_cfg_files)
else:
logger.info("No legacy file(s) configured for monitored system configuration")
# Alignak global environment file
# -------------------------------
# Here we did not yet read the Alignak configuration file (except for the Arbiter daemon
# configuration.
# We must get the Alignak macros and global configuration parameters
# ---------------------
# Manage Alignak macros; this before loading the legacy configuration files
# with their own potential macros
# ---------------------
macros = []
# Get the macros / variables declared in the Alignak environment (alignak.ini) file!
if self.alignak_env:
# The properties defined in the alignak.cfg file are not yet set! So we set the one
# got from the environment
logger.info("Getting Alignak macros...")
alignak_macros = self.alignak_env.get_alignak_macros()
if alignak_macros:
# Remove the leading and trailing underscores
for key in sorted(alignak_macros.keys()):
value = alignak_macros[key]
if key[0] == '_' or key[0] == '$':
key = key[1:]
if key[-1] == '_' or key[-1] == '$':
key = key[:-1]
# Create an old legacy macro format
macros.append('$%s$=%s' % (key.upper(), value))
logger.debug("- Alignak macro '$%s$' = %s", key.upper(), value)
# and then the global configuration.
# The properties defined in the alignak.cfg file are not yet set! So we set the one
# got from the appropriate section of the Alignak environment file
logger.info("Getting Alignak configuration...")
alignak_configuration = self.alignak_env.get_alignak_configuration()
if alignak_configuration:
for key in sorted(alignak_configuration.keys()):
value = alignak_configuration[key]
if key.startswith('_'):
# Ignore configuration variables prefixed with _
continue
if key in self.conf.properties:
entry = self.conf.properties[key]
setattr(self.conf, key, entry.pythonize(value))
else:
setattr(self.conf, key, value)
logger.debug("- setting '%s' as %s - %s",
key, type(key), getattr(self.conf, key))
logger.info("Got Alignak global configuration")
self.alignak_name = getattr(self.conf, "alignak_name", self.name)
logger.info("Configuration for Alignak: %s", self.alignak_name)
# Configure the global monitoring events logger
if self.conf.log_filename != os.path.abspath(self.conf.log_filename):
if self.conf.log_filename:
self.conf.log_filename = os.path.abspath(os.path.join(
self.logdir, self.conf.log_filename))
if self.conf.log_filename:
set_monitoring_logger(self.conf.log_filename, self.conf.log_rotation_when,
self.conf.log_rotation_interval, self.conf.log_rotation_count,
self.conf.log_format, self.conf.log_date)
print("Configured a monitoring events logger: %s" % self.conf.log_filename)
else:
self.exit_on_error(message="No monitoring events log configured!", exit_code=2)
if macros:
self.conf.load_params(macros)
# Here we got the macros and alignak configuration variables from the
# alignak.ini configuration!
# The self Config object is now initialized with the global Alignak variables.
# We can now read and parse the legacy configuration files (if any...)
raw_objects = self.conf.read_config_buf(
self.conf.read_legacy_cfg_files(self.legacy_cfg_files,
self.alignak_env.cfg_files if self.alignak_env
else None)
)
if self.alignak_name != getattr(self.conf, "alignak_name", self.name):
self.alignak_name = getattr(self.conf, "alignak_name", self.name)
logger.warning("Alignak name changed from the legacy Cfg files: %s", self.alignak_name)
# Maybe conf is already invalid
# self.conf.is_correct()
if not self.conf.conf_is_correct:
self.conf.show_errors()
self.request_stop("*** One or more problems were encountered while "
"processing the configuration (first check)...", exit_code=1)
if self.legacy_cfg_files:
logger.info("I correctly loaded the legacy configuration files")
# Hacking some global parameters inherited from Nagios to create
# on the fly some Broker modules like for status.dat parameters
# or nagios.log one if there are none already available
if 'module' not in raw_objects:
raw_objects['module'] = []
extra_modules = self.conf.hack_old_nagios_parameters()
if extra_modules:
logger.info("Some inner modules were configured for Nagios legacy parameters")
for _, module in extra_modules:
raw_objects['module'].append(module)
logger.debug("Extra modules: %s", extra_modules)
# Alignak global environment file
# -------------------------------
# Here we got the monitored system configuration from the legacy configuration files
# We must overload this configuration for the daemons and modules with the configuration
# declared in the Alignak environment (alignak.ini) file!
if self.alignak_env:
# Update the daemons legacy configuration if not complete
for daemon_type in ['arbiter', 'scheduler', 'broker',
'poller', 'reactionner', 'receiver']:
if daemon_type not in raw_objects:
raw_objects[daemon_type] = []
# Get all the Alignak daemons from the configuration
logger.info("Getting daemons configuration...")
some_daemons = False
for daemon_name, daemon_cfg in list(self.alignak_env.get_daemons().items()):
logger.info("Got a daemon configuration for %s", daemon_name)
if 'type' not in daemon_cfg:
self.conf.add_error("Ignoring daemon with an unknown type: %s" % daemon_name)
continue
some_daemons = True
daemon_type = daemon_cfg['type']
daemon_name = daemon_cfg['name']
logger.info("- got a %s named %s, spare: %s",
daemon_type, daemon_name, daemon_cfg.get('spare', False))
# If this daemon is found in the legacy configuration, replace this
new_cfg_daemons = []
for cfg_daemon in raw_objects[daemon_type]:
if cfg_daemon.get('name', 'unset') == daemon_name \
or cfg_daemon.get("%s_name" % daemon_type,
'unset') == [daemon_name]:
logger.info(" updating daemon Cfg file configuration")
else:
new_cfg_daemons.append(cfg_daemon)
new_cfg_daemons.append(daemon_cfg)
raw_objects[daemon_type] = new_cfg_daemons
logger.debug("Checking daemons configuration:")
some_legacy_daemons = False
for daemon_type in ['arbiter', 'scheduler', 'broker',
'poller', 'reactionner', 'receiver']:
for cfg_daemon in raw_objects[daemon_type]:
some_legacy_daemons = True
if 'name' not in cfg_daemon:
cfg_daemon['name'] = cfg_daemon['%s_name' % daemon_type]
# Manage daemon modules definition (may be not existing or empty or string)
if 'modules' not in cfg_daemon:
cfg_daemon['modules'] = []
if not isinstance(cfg_daemon['modules'], list):
cfg_daemon['modules'] = [cfg_daemon['modules']]
cfg_daemon['modules'].extend(
self.alignak_env.get_modules(daemon_name=cfg_daemon['name']))
cfg_daemon['modules'] = list(set(cfg_daemon['modules']))
for module_daemon_type, module in extra_modules:
if module_daemon_type == daemon_type:
cfg_daemon['modules'].append(module['name'])
logger.info("- added an Alignak inner module '%s' to the %s: %s",
module['name'], daemon_type, cfg_daemon['name'])
logger.debug("- %s / %s: ", daemon_type, cfg_daemon['name'])
logger.debug(" %s", cfg_daemon)
if not some_legacy_daemons:
logger.debug("- No legacy configured daemons.")
else:
logger.info("- some dameons are configured in legacy Cfg files. "
"You should update the configuration with the new Alignak "
"configuration file.")
if not some_daemons and not some_legacy_daemons:
logger.info("- No configured daemons.")
# and then get all modules from the configuration
logger.info("Getting modules configuration...")
if 'module' in raw_objects and raw_objects['module']:
# Manage the former parameters module_alias and module_types
# - replace with name and type
for module_cfg in raw_objects['module']:
if 'module_alias' not in module_cfg and 'name' not in module_cfg:
self.conf.add_error("Module declared without any 'name' or 'module_alias'")
continue
else:
if 'name' not in module_cfg:
module_cfg['name'] = module_cfg['module_alias']
module_cfg.pop('module_alias')
if 'module_types' in module_cfg and 'type' not in module_cfg:
module_cfg['type'] = module_cfg['module_types']
module_cfg.pop('module_types')
logger.debug("Module cfg %s params: %s", module_cfg['name'], module_cfg)
for _, module_cfg in list(self.alignak_env.get_modules().items()):
logger.info("- got a module %s, type: %s",
module_cfg.get('name', 'unset'), module_cfg.get('type', 'untyped'))
# If this module is found in the former Cfg files, replace the former configuration
for cfg_module in raw_objects['module']:
if cfg_module.get('name', 'unset') == [module_cfg['name']]:
logger.info(" updating module Cfg file configuration")
cfg_module = module_cfg
logger.info("Module %s updated parameters: %s",
module_cfg['name'], module_cfg)
break
else:
raw_objects['module'].append(module_cfg)
logger.debug("Module env %s params: %s", module_cfg['name'], module_cfg)
if 'module' in raw_objects and not raw_objects['module']:
logger.info("- No configured modules.")
# Create objects for our arbiters and modules
self.conf.early_create_objects(raw_objects)
# Check that an arbiter link exists and create the appropriate relations
# If no arbiter exists, create one with the provided data
params = {}
if self.alignak_env:
params = self.alignak_env.get_alignak_configuration()
self.conf.early_arbiter_linking(self.name, params)
# Search which arbiter I am in the arbiter links list
for lnk_arbiter in self.conf.arbiters:
logger.debug("I have an arbiter in my configuration: %s", lnk_arbiter.name)
if lnk_arbiter.name != self.name:
# Arbiter is not me!
logger.info("I found another arbiter (%s) in my (%s) configuration",
lnk_arbiter.name, self.name)
# And this arbiter needs to receive a configuration
lnk_arbiter.need_conf = True
continue
logger.info("I found myself in the configuration: %s", lnk_arbiter.name)
if self.link_to_myself is None:
# I update only if it does not yet exist (first configuration load)!
# I will not change myself because I am simply reloading a configuration ;)
self.link_to_myself = lnk_arbiter
self.link_to_myself.instance_id = self.name
self.link_to_myself.push_flavor = ''.encode('utf-8')
# self.link_to_myself.hash = self.conf.hash
# Set myself as alive ;)
self.link_to_myself.set_alive()
# We consider that this arbiter is a master one...
self.is_master = not self.link_to_myself.spare
if self.is_master:
logger.info("I am the master Arbiter.")
else:
logger.info("I am a spare Arbiter.")
# ... and that this arbiter do not need to receive a configuration
lnk_arbiter.need_conf = False
if not self.link_to_myself:
self.conf.show_errors()
self.request_stop("Error: I cannot find my own configuration (%s), I bail out. "
"To solve this, please change the arbiter name parameter in "
"the Alignak configuration file (certainly alignak.ini) "
"with the value '%s'."
" Thanks." % (self.name, socket.gethostname()), exit_code=1)
# Whether I am a spare arbiter, I will parse the whole configuration. This may be useful
# if the master fails before sending its configuration to me!
# An Arbiter which is not a master one will not go further...
# todo: is it a good choice?:
# 1/ why reading all the configuration files stuff?
# 2/ why not loading configuration data from the modules?
# -> Indeed, here, only the main configuration has been fetched by the arbiter.
# Perharps, loading only the alignak.ini would be enough for a spare arbiter.
# And it will make it simpler to configure...
if not self.is_master:
logger.info("I am not the master arbiter, I stop parsing the configuration")
self.loading_configuration = False
return
# We load our own modules
self.do_load_modules(self.link_to_myself.modules)
# Call modules that manage this read configuration pass
_ts = time.time()
self.hook_point('read_configuration')
statsmgr.timer('hook.read_configuration', time.time() - _ts)
# Call modules get_alignak_configuration() to load Alignak configuration parameters
# todo: re-enable this feature if it is really needed. It is a bit tricky to manage
# configuration from our own configuration file and from an external source :(
# (example modules: alignak_backend)
# _t0 = time.time()
# self.load_modules_alignak_configuration()
# statsmgr.timer('core.hook.get_alignak_configuration', time.time() - _t0)
# Call modules get_objects() to load new objects our own modules
# (example modules: alignak_backend)
self.load_modules_configuration_objects(raw_objects)
# Create objects for all the configuration
self.conf.create_objects(raw_objects)
# Maybe configuration is already invalid
# self.conf.is_correct()
if not self.conf.conf_is_correct:
self.conf.show_errors()
self.request_stop("*** One or more problems were encountered while processing "
"the configuration (second check)...", exit_code=1)
# Manage all post-conf modules
self.hook_point('early_configuration')
# Here we got all our Alignak configuration and the monitored system configuration
# from the legacy configuration files and extra modules.
logger.info("Preparing configuration...")
# Create Template links
self.conf.linkify_templates()
# All inheritances
self.conf.apply_inheritance()
# Explode between types
self.conf.explode()
# Implicit inheritance for services
self.conf.apply_implicit_inheritance()
# Fill default values for all the configuration objects
self.conf.fill_default_configuration()
# Remove templates from config
# Do not remove anymore!
# self.conf.remove_templates()
# Overrides specific service instances properties
self.conf.override_properties()
# Linkify objects to each other
self.conf.linkify()
# applying dependencies
self.conf.apply_dependencies()
# Raise warning about currently unmanaged parameters
if self.verify_only:
self.conf.warn_about_unmanaged_parameters()
# Explode global configuration parameters into Classes
self.conf.explode_global_conf()
# set our own timezone and propagate it to other satellites
self.conf.propagate_timezone_option()
# Look for business rules, and create the dep tree
self.conf.create_business_rules()
# And link them
self.conf.create_business_rules_dependencies()
# Set my own parameters from the loaded configuration
# Last monitoring events
self.recent_events = deque(maxlen=int(os.environ.get('ALIGNAK_EVENTS_LOG_COUNT',
self.conf.events_log_count)))
# Manage all post-conf modules
self.hook_point('late_configuration')
# # Clean objects of temporary/unnecessary attributes for live work:
# if clean:
# logger.info("Cleaning configuration objects...")
# self.conf.clean()
#
# Dump Alignak macros
logger.debug("Alignak global macros:")
macro_resolver = MacroResolver()
macro_resolver.init(self.conf)
for macro_name in sorted(self.conf.macros):
macro_value = macro_resolver.resolve_simple_macros_in_string("$%s$" % macro_name, [],
None, None)
logger.debug("- $%s$ = %s", macro_name, macro_value)
statsmgr.timer('configuration.loading', time.time() - _t_configuration)
# Configuration is correct?
logger.info("Checking configuration...")
self.conf.is_correct()
# Clean objects of temporary/unnecessary attributes for live work:
if clean:
logger.info("Cleaning configuration objects...")
self.conf.clean()
# REF: doc/alignak-conf-dispatching.png (2)
logger.info("Splitting configuration...")
self.conf.cut_into_parts()
# Here, the self.conf.parts exist
# And the realms have some 'packs'
# Check if all the configuration daemons will be available
if not self.daemons_start(run_daemons=False):
self.conf.show_errors()
self.request_stop("*** Alignak will not be able to manage the configured daemons. "
"Check and update your configuration!", exit_code=1)
# Some properties need to be prepared (somehow "flatten"...) before being sent,
# This to prepare the configuration that will be sent to our spare arbiter (if any)
self.conf.prepare_for_sending()
statsmgr.timer('configuration.spliting', time.time() - _t_configuration)
# Here, the self.conf.spare_arbiter_conf exist
# Still a last configuration check because some things may have changed when
# we cut the configuration into parts (eg. hosts and realms consistency) and
# when we prepared the configuration for sending
if not self.conf.conf_is_correct: # pragma: no cover, not with unit tests.
self.conf.show_errors()
self.request_stop("Configuration is incorrect, sorry, I bail out", exit_code=1)
logger.info("Things look okay - "
"No serious problems were detected during the pre-flight check")
# Exit if we are just here for config checking
if self.verify_only:
logger.info("Arbiter %s checked the configuration", self.name)
if self.conf.missing_daemons:
logger.warning("Some missing daemons were detected in the parsed configuration. "
"Nothing to worry about, but you should define them, "
"else Alignak will use its default configuration.")
del raw_objects
# Display found warnings and errors
self.conf.show_errors()
# Now I have a configuration!
self.have_conf = True
self.loading_configuration = False
statsmgr.timer('configuration.available', time.time() - _t_configuration)
def load_modules_configuration_objects(self, raw_objects): # pragma: no cover,
# not yet with unit tests.
"""Load configuration objects from arbiter modules
If module implements get_objects arbiter will call it and add create
objects
:param raw_objects: raw objects we got from reading config files
:type raw_objects: dict
:return: None
"""
# Now we ask for configuration modules if they
# got items for us
for instance in self.modules_manager.instances:
logger.debug("Getting objects from the module: %s", instance.name)
if not hasattr(instance, 'get_objects'):
logger.debug("The module '%s' do not provide any objects.", instance.name)
return
try:
logger.info("Getting Alignak monitored configuration objects from module '%s'",
instance.name)
got_objects = instance.get_objects()
except Exception as exp: # pylint: disable=broad-except
logger.exception("Module %s get_objects raised an exception %s. "
"Log and continue to run.", instance.name, exp)
continue
if not got_objects:
logger.warning("The module '%s' did not provided any objects.", instance.name)
return
types_creations = self.conf.types_creations
for o_type in types_creations:
(_, _, prop, _, _) = types_creations[o_type]
if prop in ['arbiters', 'brokers', 'schedulers',
'pollers', 'reactionners', 'receivers', 'modules']:
continue
if prop not in got_objects:
logger.info("Did not get any '%s' objects from %s", prop, instance.name)
continue
for obj in got_objects[prop]:
# test if raw_objects[k] are already set - if not, add empty array
if o_type not in raw_objects:
raw_objects[o_type] = []
# Update the imported_from property if the module did not set
# if 'imported_from' not in obj:
# obj['imported_from'] = 'module:%s' % instance.name
# Append to the raw objects
raw_objects[o_type].append(obj)
logger.debug("Added %i %s objects from %s",
len(got_objects[prop]), o_type, instance.name)
def load_modules_alignak_configuration(self): # pragma: no cover, not yet with unit tests.
"""Load Alignak configuration from the arbiter modules
If module implements get_alignak_configuration, call this function
:return: None
"""
alignak_cfg = {}
# Ask configured modules if they got configuration for us
for instance in self.modules_manager.instances:
if not hasattr(instance, 'get_alignak_configuration'):
return
try:
logger.info("Getting Alignak global configuration from module '%s'", instance.name)
cfg = instance.get_alignak_configuration()
alignak_cfg.update(cfg)
except Exception as exp: # pylint: disable=broad-except
logger.error("Module %s get_alignak_configuration raised an exception %s. "
"Log and continue to run", instance.name, str(exp))
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Back trace of this remove: %s", output.getvalue())
output.close()
continue
params = []
if alignak_cfg:
logger.info("Got Alignak global configuration:")
for key, value in sorted(alignak_cfg.items()):
logger.info("- %s = %s", key, value)
# properties starting with an _ character are "transformed" to macro variables
if key.startswith('_'):
key = '$' + key[1:].upper() + '$'
# properties valued as None are filtered
if value is None:
continue
# properties valued as None string are filtered
if value == 'None':
continue
# properties valued as empty strings are filtered
if value == '':
continue
# set properties as legacy Shinken configuration files
params.append("%s=%s" % (key, value))
self.conf.load_params(params)
def request_stop(self, message='', exit_code=0):
"""Stop the Arbiter daemon
:return: None
"""
# Only a master arbiter can stop the daemons
if self.is_master:
# Stop the daemons
self.daemons_stop(timeout=self.conf.daemons_stop_timeout)
# Request the daemon stop
super(Arbiter, self).request_stop(message, exit_code)
def start_daemon(self, satellite):
"""Manage the list of detected missing daemons
If the daemon does not in exist `my_daemons`, then:
- prepare daemon start arguments (port, name and log file)
- start the daemon
- make sure it started correctly
:param satellite: the satellite for which a daemon is to be started
:type satellite: SatelliteLink
:return: True if the daemon started correctly
"""
logger.info(" launching a daemon for: %s/%s...", satellite.type, satellite.name)
# The daemon startup script location may be defined in the configuration
daemon_script_location = getattr(self.conf, 'daemons_script_location', self.bindir)
if not daemon_script_location:
daemon_script_location = "alignak-%s" % satellite.type
else:
daemon_script_location = "%s/alignak-%s" % (daemon_script_location, satellite.type)
# Some extra arguments may be defined in the Alignak configuration
daemon_arguments = getattr(self.conf, 'daemons_arguments', '')
args = [daemon_script_location,
"--name", satellite.name,
"--environment", self.env_filename,
"--host", str(satellite.host),
"--port", str(satellite.port)]
if daemon_arguments:
args.append(daemon_arguments)
logger.info(" ... with some arguments: %s", args)
try:
process = psutil.Popen(args, stdin=None, stdout=None, stderr=None)
# A brief pause...
time.sleep(0.1)
except Exception as exp: # pylint: disable=broad-except
logger.error("Error when launching %s: %s", satellite.name, exp)
logger.error("Command: %s", args)
return False
logger.info(" %s launched (pid=%d, gids=%s)",
satellite.name, process.pid, process.gids())
# My satellites/daemons map
self.my_daemons[satellite.name] = {
'satellite': satellite,
'process': process
}
return True
def daemons_start(self, run_daemons=True):
"""Manage the list of the daemons in the configuration
Check if the daemon needs to be started by the Arbiter.
If so, starts the daemon if `run_daemons` is True
:param run_daemons: run the daemons or make a simple check
:type run_daemons: bool
:return: True if all daemons are running, else False. always True for a simple check
"""
result = True
if run_daemons:
logger.info("Alignak configured daemons start:")
else:
logger.info("Alignak configured daemons check:")
# Parse the list of the missing daemons and try to run the corresponding processes
for satellites_list in [self.conf.arbiters, self.conf.receivers, self.conf.reactionners,
self.conf.pollers, self.conf.brokers, self.conf.schedulers]:
for satellite in satellites_list:
logger.info("- found %s, to be launched: %s, address: %s",
satellite.name, satellite.alignak_launched, satellite.uri)
if satellite == self.link_to_myself:
# Ignore myself ;)
continue
if satellite.alignak_launched and \
satellite.address not in ['127.0.0.1', 'localhost']:
logger.error("Alignak is required to launch a daemon for %s %s "
"but the satelitte is defined on an external address: %s",
satellite.type, satellite.name, satellite.address)
result = False
continue
if not run_daemons:
# When checking, ignore the daemon launch part...
continue
if not satellite.alignak_launched:
logger.debug("Alignak will not launch '%s'")
continue
if not satellite.active:
logger.warning("- daemon '%s' is declared but not set as active, "
"do not start...", satellite.name)
continue
if satellite.name in self.my_daemons:
logger.warning("- daemon '%s' is already running", satellite.name)
continue
started = self.start_daemon(satellite)
result = result and started
return result
def daemons_check(self):
"""Manage the list of Alignak launched daemons
Check if the daemon process is running
:return: True if all daemons are running, else False
"""
# First look if it's not too early to ping
start = time.time()
if self.daemons_last_check \
and self.daemons_last_check + self.conf.daemons_check_period > start:
logger.debug("Too early to check daemons, check period is %.2f seconds",
self.conf.daemons_check_period)
return True
logger.debug("Alignak launched daemons check")
result = True
procs = [psutil.Process()]
for daemon in list(self.my_daemons.values()):
# Get only the daemon (not useful for its children processes...)
# procs = daemon['process'].children()
procs.append(daemon['process'])
for proc in procs:
try:
logger.debug("Process %s is %s", proc.name(), proc.status())
# logger.debug("Process listening:", proc.name(), proc.status())
# for connection in proc.connections():
# l_addr, l_port = connection.laddr if connection.laddr else ('', 0)
# r_addr, r_port = connection.raddr if connection.raddr else ('', 0)
# logger.debug("- %s:%s <-> %s:%s, %s", l_addr, l_port, r_addr, r_port,
# connection.status)
# Reset the daemon connection if it got broked...
if not daemon['satellite'].con:
if self.daemon_connection_init(daemon['satellite']):
# Set my satellite as alive :)
daemon['satellite'].set_alive()
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
# Probably stopping...
if not self.will_stop and proc == daemon['process']:
logger.warning("Daemon %s/%s is not running!",
daemon['satellite'].type, daemon['satellite'].name)
logger.debug("Access denied - Process %s is %s", proc.name(), proc.status())
if not self.start_daemon(daemon['satellite']):
# Set my satellite as dead :(
daemon['satellite'].set_dead()
result = False
else:
logger.info("I restarted %s/%s",
daemon['satellite'].type, daemon['satellite'].name)
logger.info("Pausing %.2f seconds...", 0.5)
time.sleep(0.5)
else:
logger.info("Child process %s is %s", proc.name(), proc.status())
# Set the last check as now
self.daemons_last_check = start
logger.debug("Checking daemons duration: %.2f seconds", time.time() - start)
return result
def daemons_stop(self, timeout=30, kill_children=False):
"""Stop the Alignak daemons
Iterate over the self-launched daemons and their children list to send a TERM
Wait for daemons to terminate and then send a KILL for those that are not yet stopped
As a default behavior, only the launched daemons are killed, not their children.
Each daemon will manage its children killing
:param timeout: delay to wait before killing a daemon
:type timeout: int
:param kill_children: also kill the children (defaults to False)
:type kill_children: bool
:return: True if all daemons stopped
"""
def on_terminate(proc):
"""Process termination callback function"""
logger.debug("process %s terminated with exit code %s", proc.pid, proc.returncode)
result = True
if self.my_daemons:
logger.info("Alignak self-launched daemons stop:")
start = time.time()
for daemon in list(self.my_daemons.values()):
# Terminate the daemon and its children process
procs = []
if kill_children:
procs = daemon['process'].children()
procs.append(daemon['process'])
for process in procs:
try:
logger.info("- terminating process %s", process.name())
process.terminate()
except psutil.AccessDenied:
logger.warning("Process %s is %s", process.name(), process.status())
time.sleep(1)
procs = []
for daemon in list(self.my_daemons.values()):
# Stop the daemon and its children process
if kill_children:
procs = daemon['process'].children()
procs.append(daemon['process'])
_, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# Kill processes
for process in alive:
logger.info("Process %s did not stopped, trying to kill", process.name())
process.kill()
_, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for process in alive:
logger.warning("process %s survived SIGKILL; giving up", process.name())
result = False
logger.debug("Stopping daemons duration: %.2f seconds", time.time() - start)
return result
def daemons_reachability_check(self):
"""Manage the list of Alignak launched daemons
Check if the daemon process is running
Then, check the daemon status and get its monitoring events
:return: True if all daemons are running, else False
"""
# First look if it's not too early to ping
start = time.time()
if self.daemons_last_reachable_check and \
self.daemons_last_reachable_check + self.conf.daemons_check_period > start:
logger.debug("Too early to check daemons reachability, check period is %.2f seconds",
self.conf.daemons_check_period)
return True
_t0 = time.time()
logger.debug("Alignak daemons reachability check")
result = self.dispatcher.check_reachable()
statsmgr.timer('dispatcher.check-alive', time.time() - _t0)
_t0 = time.time()
logger.debug("Alignak daemons status get")
events = self.dispatcher.check_status_and_get_events()
duration = time.time() - _t0
statsmgr.timer('dispatcher.check-status', duration)
logger.debug("Getting daemons status duration: %.2f seconds", duration)
# Send the collected events to the Alignak logger
for event in events:
event.prepare()
make_monitoring_log(event.data['level'], event.data['message'],
timestamp=event.creation_time, to_logger=True)
# Add to the recent events for the WS endpoint
event.data['timestamp'] = event.creation_time
event.data['date'] = datetime.fromtimestamp(event.creation_time).\
strftime(self.conf.events_date_format)
event.data.pop('instance_id')
self.recent_events.append(event.data)
# Set the last check as now
self.daemons_last_reachable_check = start
logger.debug("Checking daemons reachability duration: %.2f seconds", time.time() - start)
return result
def setup_new_conf(self):
# pylint: disable=too-many-locals
""" Setup a new configuration received from a Master arbiter.
TODO: perharps we should not accept the configuration or raise an error if we do not
find our own configuration data in the data. Thus this should never happen...
:return: None
"""
# Execute the base class treatment...
super(Arbiter, self).setup_new_conf()
with self.conf_lock:
logger.info("I received a new configuration from my master")
# Get the new configuration
self.cur_conf = self.new_conf
# self_conf is our own configuration from the alignak environment
# Arbiters do not have this property in the received configuration because
# they already loaded a configuration on daemon load
self_conf = self.cur_conf.get('self_conf', None)
if not self_conf:
self_conf = self.conf
# whole_conf contains the full configuration load by my master
whole_conf = self.cur_conf['whole_conf']
logger.debug("Received a new configuration, containing:")
for key in self.cur_conf:
logger.debug("- %s: %s", key, self.cur_conf[key])
logger.debug("satellite self configuration part: %s", self_conf)
# Update Alignak name
self.alignak_name = self.cur_conf['alignak_name']
logger.info("My Alignak instance: %s", self.alignak_name)
# This to indicate that the new configuration got managed...
self.new_conf = {}
# Get the whole monitored objects configuration
t00 = time.time()
try:
received_conf_part = unserialize(whole_conf)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter"
}
logger.error(self.new_conf['_status'])
logger.error("Back trace of the error:\n%s", traceback.format_exc())
return
except Exception as exp: # pylint: disable=broad-except
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter"
}
logger.error(self.new_conf['_status'])
logger.error(self.new_conf)
self.exit_on_exception(exp, self.new_conf)
logger.info("Monitored configuration %s received at %d. Un-serialized in %d secs",
received_conf_part, t00, time.time() - t00)
# Now we create our arbiters and schedulers links
my_satellites = getattr(self, 'arbiters', {})
received_satellites = self.cur_conf['arbiters']
for link_uuid in received_satellites:
rs_conf = received_satellites[link_uuid]
logger.debug("- received %s - %s: %s", rs_conf['instance_id'],
rs_conf['type'], rs_conf['name'])
# Must look if we already had a configuration and save our broks
already_got = rs_conf['instance_id'] in my_satellites
broks = []
actions = {}
wait_homerun = {}
external_commands = {}
running_id = 0
if already_got:
logger.warning("I already got: %s", rs_conf['instance_id'])
# Save some information
running_id = my_satellites[link_uuid].running_id
(broks, actions,
wait_homerun, external_commands) = \
my_satellites[link_uuid].get_and_clear_context()
# Delete the former link
del my_satellites[link_uuid]
# My new satellite link...
new_link = SatelliteLink.get_a_satellite_link('arbiter', rs_conf)
my_satellites[new_link.uuid] = new_link
logger.info("I got a new arbiter satellite: %s", new_link)
new_link.running_id = running_id
new_link.external_commands = external_commands
new_link.broks = broks
new_link.wait_homerun = wait_homerun
new_link.actions = actions
# # replacing satellite address and port by those defined in satellite_map
# if new_link.name in self_conf.satellite_map:
# overriding = self_conf.satellite_map[new_link.name]
# # satellite = dict(satellite) # make a copy
# # new_link.update(self_conf.get('satellite_map', {})[new_link.name])
# logger.warning("Do not override the configuration for: %s, with: %s. "
# "Please check whether this is necessary!",
# new_link.name, overriding)
# for arbiter_link in received_conf_part.arbiters:
# logger.info("I have arbiter links in my configuration: %s", arbiter_link.name)
# if arbiter_link.name != self.name and not arbiter_link.spare:
# # Arbiter is not me!
# logger.info("I found my master arbiter in the configuration: %s",
# arbiter_link.name)
# continue
#
# logger.info("I found myself in the received configuration: %s", arbiter_link.name)
# self.link_to_myself = arbiter_link
# # We received a configuration s we are not a master !
# self.is_master = False
# self.link_to_myself.spare = True
# # Set myself as alive ;)
# self.link_to_myself.set_alive()
# Now I have a configuration!
self.have_conf = True
def wait_for_master_death(self):
"""Wait for a master timeout and take the lead if necessary
:return: None
"""
logger.info("Waiting for master death")
timeout = 1.0
self.last_master_ping = time.time()
master_timeout = 300
for arbiter_link in self.conf.arbiters:
if not arbiter_link.spare:
master_timeout = \
arbiter_link.spare_check_interval * arbiter_link.spare_max_check_attempts
logger.info("I'll wait master death for %d seconds", master_timeout)
while not self.interrupted:
# Make a pause and check if the system time changed
_, tcdiff = self.make_a_pause(timeout)
# If there was a system time change then we have to adapt last_master_ping:
if tcdiff:
self.last_master_ping += tcdiff
if self.new_conf:
self.setup_new_conf()
sys.stdout.write(".")
sys.stdout.flush()
# Now check if master is dead or not
now = time.time()
if now - self.last_master_ping > master_timeout:
logger.info("Arbiter Master is dead. The arbiter %s takes the lead!",
self.link_to_myself.name)
for arbiter_link in self.conf.arbiters:
if not arbiter_link.spare:
arbiter_link.alive = False
self.must_run = True
break
def check_and_log_tp_activation_change(self):
"""Raise log for timeperiod change (useful for debug)
:return: None
"""
for timeperiod in self.conf.timeperiods:
brok = timeperiod.check_and_log_activation_change()
if brok:
self.add(brok)
def manage_signal(self, sig, frame):
"""Manage signals caught by the process
Specific behavior for the arbiter when it receives a sigkill or sigterm
:param sig: signal caught by the process
:type sig: str
:param frame: current stack frame
:type frame:
:return: None
"""
# Request the arbiter to stop
if sig in [signal.SIGINT, signal.SIGTERM]:
logger.info("received a signal: %s", SIGNALS_TO_NAMES_DICT[sig])
self.kill_request = True
self.kill_timestamp = time.time()
logger.info("request to stop in progress")
else:
Daemon.manage_signal(self, sig, frame)
def configuration_dispatch(self, not_configured=None):
"""Monitored configuration preparation and dispatch
:return: None
"""
if not not_configured:
self.dispatcher = Dispatcher(self.conf, self.link_to_myself)
# I set my own dispatched configuration as the provided one...
# because I will not push a configuration to myself :)
self.cur_conf = self.conf
# Loop for the first configuration dispatching, if the first dispatch fails, bail out!
# Without a correct configuration, Alignak daemons will not run correctly
first_connection_try_count = 0
logger.info("Connecting to my satellites...")
while True:
first_connection_try_count += 1
# Initialize connection with all our satellites
self.all_connected = True
for satellite in self.dispatcher.all_daemons_links:
if satellite == self.link_to_myself:
continue
if not satellite.active:
continue
connected = self.daemon_connection_init(satellite, set_wait_new_conf=True)
logger.debug(" %s is %s", satellite, connected)
self.all_connected = self.all_connected and connected
if self.all_connected:
logger.info("- satellites connection #%s is ok", first_connection_try_count)
break
else:
logger.warning("- satellites connection #%s is not correct; "
"let's give another chance after %d seconds...",
first_connection_try_count,
self.link_to_myself.polling_interval)
if first_connection_try_count >= 3:
self.request_stop("All the daemons connections could not be established "
"despite %d tries! "
"Sorry, I bail out!" % first_connection_try_count,
exit_code=4)
time.sleep(self.link_to_myself.polling_interval)
# Now I have a connection with all the daemons I need to contact them,
# check they are alive and ready to run
_t0 = time.time()
self.all_connected = self.dispatcher.check_reachable()
statsmgr.timer('dispatcher.check-alive', time.time() - _t0)
_t0 = time.time()
# Preparing the configuration for dispatching
logger.info("Preparing the configuration for dispatching...")
self.dispatcher.prepare_dispatch()
statsmgr.timer('dispatcher.prepare-dispatch', time.time() - _t0)
logger.info("- configuration is ready to dispatch")
# Loop for the first configuration dispatching, if the first dispatch fails, bail out!
# Without a correct configuration, Alignak daemons will not run correctly
first_dispatch_try_count = 0
logger.info("Dispatching the configuration to my satellites...")
while True:
first_dispatch_try_count += 1
# Check reachable - if a configuration is prepared, this will force the
# daemons communication, and the dispatching will be launched
_t0 = time.time()
logger.info("- configuration dispatching #%s...", first_dispatch_try_count)
self.dispatcher.check_reachable(forced=True)
statsmgr.timer('dispatcher.dispatch', time.time() - _t0)
# Make a pause to let our satellites get ready...
pause = max(1, max(self.conf.daemons_dispatch_timeout, len(self.my_daemons) * 0.5))
# pause = len(self.my_daemons) * 0.2
logger.info("- pausing %d seconds...", pause)
time.sleep(pause)
_t0 = time.time()
logger.info("- checking configuration dispatch...")
# Checking the dispatch is accepted
self.dispatcher.check_dispatch()
statsmgr.timer('dispatcher.check-dispatch', time.time() - _t0)
if self.dispatcher.dispatch_ok:
logger.info("- configuration dispatching #%s is ok", first_dispatch_try_count)
break
else:
logger.warning("- configuration dispatching #%s is not correct; "
"let's give another chance...", first_dispatch_try_count)
if first_dispatch_try_count >= 3:
self.request_stop("The configuration could not be dispatched despite %d tries! "
"Sorry, I bail out!" % first_connection_try_count,
exit_code=4)
def do_before_loop(self):
"""Called before the main daemon loop.
:return: None
"""
logger.info("I am the arbiter: %s", self.link_to_myself.name)
# If I am a spare, I do not have anything to do here...
if not self.is_master:
logger.debug("Waiting for my master death...")
return
# Arbiter check if some daemons need to be started
if not self.daemons_start(run_daemons=True):
self.request_stop(message="Some Alignak daemons did not started correctly.",
exit_code=4)
if not self.daemons_check():
self.request_stop(message="Some Alignak daemons cannot be checked.",
exit_code=4)
# Make a pause to let our started daemons get ready...
pause = max(1, max(self.conf.daemons_start_timeout, len(self.my_daemons) * 0.5))
if pause:
logger.info("Pausing %.2f seconds...", pause)
time.sleep(pause)
# Prepare and dispatch the monitored configuration
self.configuration_dispatch()
# Now we can get all initial broks for our satellites
_t0 = time.time()
self.get_initial_broks_from_satellites()
statsmgr.timer('broks.get-initial', time.time() - _t0)
# Now create the external commands manager
# We are a dispatcher: our role is to dispatch commands to the schedulers
self.external_commands_manager = ExternalCommandManager(
self.conf, 'dispatcher', self, self.conf.accept_passive_unknown_check_results,
self.conf.log_external_commands)
def do_loop_turn(self):
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
"""Loop turn for Arbiter
If not a master daemon, wait for my master death...
Else, run:
* Check satellites are alive
* Check and dispatch (if needed) the configuration
* Get broks and external commands from the satellites
* Push broks and external commands to the satellites
:return: None
"""
# If I am a spare, I only wait for the master arbiter to die...
if not self.is_master:
logger.debug("Waiting for my master death...")
self.wait_for_master_death()
return
if self.loop_count % self.alignak_monitor_period == 1:
self.get_alignak_status(details=True)
# Maybe an external process requested Alignak stop...
if self.kill_request:
logger.info("daemon stop mode ...")
if not self.dispatcher.stop_request_sent:
logger.info("entering daemon stop mode, time before exiting: %s",
self.conf.daemons_stop_timeout)
self.dispatcher.stop_request()
if time.time() > self.kill_timestamp + self.conf.daemons_stop_timeout:
logger.info("daemon stop mode delay reached, immediate stop")
self.dispatcher.stop_request(stop_now=True)
time.sleep(1)
self.interrupted = True
logger.info("exiting...")
if not self.kill_request:
# Main loop treatment
# Try to see if one of my module is dead, and restart previously dead modules
self.check_and_del_zombie_modules()
# Call modules that manage a starting tick pass
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
# Look for logging timeperiods activation change (active/inactive)
self.check_and_log_tp_activation_change()
# Check that my daemons are alive
if not self.daemons_check():
if self.conf.daemons_failure_kill:
self.request_stop(message="Some Alignak daemons cannot be checked.",
exit_code=4)
else:
logger.warning("Should have killed my children if "
"'daemons_failure_kill' were set!")
# Now the dispatcher job - check if all daemons are reachable and have a configuration
if not self.daemons_reachability_check():
logger.warning("A new configuration dispatch is required!")
# Prepare and dispatch the monitored configuration
self.configuration_dispatch(self.dispatcher.not_configured)
# Now get things from our module instances
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer('get-objects-from-queues', time.time() - _t0)
# Maybe our satellites raised new broks. Reap them...
_t0 = time.time()
self.get_broks_from_satellites()
statsmgr.timer('broks.got.time', time.time() - _t0)
# One broker is responsible for our broks, we give him our broks
_t0 = time.time()
self.push_broks_to_broker()
statsmgr.timer('broks.pushed.time', time.time() - _t0)
# # We push our external commands to our schedulers...
# _t0 = time.time()
# self.push_external_commands_to_schedulers()
# statsmgr.timer('external-commands.pushed.time', time.time() - _t0)
if self.system_health and (self.loop_count % self.system_health_period == 1):
perfdatas = []
cpu_count = psutil.cpu_count()
perfdatas.append("'cpu_count'=%d" % cpu_count)
logger.debug(" . cpu count: %d", cpu_count)
cpu_percents = psutil.cpu_percent(percpu=True)
cpu = 1
for percent in cpu_percents:
perfdatas.append("'cpu_%d_percent'=%.2f%%" % (cpu, percent))
cpu += 1
cpu_times_percent = psutil.cpu_times_percent(percpu=True)
cpu = 1
for cpu_times_percent in cpu_times_percent:
logger.debug(" . cpu time percent: %s", cpu_times_percent)
for key in cpu_times_percent._fields:
perfdatas.append(
"'cpu_%d_%s_percent'=%.2f%%" % (cpu, key,
getattr(cpu_times_percent, key)))
cpu += 1
logger.info("%s cpu|%s", self.name, " ".join(perfdatas))
perfdatas = []
disk_partitions = psutil.disk_partitions(all=False)
for disk_partition in disk_partitions:
logger.debug(" . disk partition: %s", disk_partition)
disk = getattr(disk_partition, 'mountpoint')
disk_usage = psutil.disk_usage(disk)
logger.debug(" . disk usage: %s", disk_usage)
for key in disk_usage._fields:
if 'percent' in key:
perfdatas.append("'disk_%s_percent_used'=%.2f%%"
% (disk, getattr(disk_usage, key)))
else:
perfdatas.append("'disk_%s_%s'=%dB"
% (disk, key, getattr(disk_usage, key)))
logger.info("%s disks|%s", self.name, " ".join(perfdatas))
perfdatas = []
virtual_memory = psutil.virtual_memory()
logger.debug(" . memory: %s", virtual_memory)
for key in virtual_memory._fields:
if 'percent' in key:
perfdatas.append("'mem_percent_used_%s'=%.2f%%"
% (key, getattr(virtual_memory, key)))
else:
perfdatas.append("'mem_%s'=%dB"
% (key, getattr(virtual_memory, key)))
swap_memory = psutil.swap_memory()
logger.debug(" . memory: %s", swap_memory)
for key in swap_memory._fields:
if 'percent' in key:
perfdatas.append("'swap_used_%s'=%.2f%%"
% (key, getattr(swap_memory, key)))
else:
perfdatas.append("'swap_%s'=%dB"
% (key, getattr(swap_memory, key)))
logger.info("%s memory|%s", self.name, " ".join(perfdatas))
def get_daemon_stats(self, details=False): # pylint: disable=too-many-branches
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
now = int(time.time())
# Call the base Daemon one
res = super(Arbiter, self).get_daemon_stats(details=details)
res.update({
'name': self.link_to_myself.get_name() if self.link_to_myself else self.name,
'type': self.type,
'daemons_states': {}
})
if details:
res['monitoring_objects'] = {}
for _, _, strclss, _, _ in list(self.conf.types_creations.values()):
if strclss in ['hostescalations', 'serviceescalations']:
logger.debug("Ignoring count for '%s'...", strclss)
continue
objects_list = getattr(self.conf, strclss, [])
res['monitoring_objects'][strclss] = {
'count': len(objects_list)
}
res['monitoring_objects'][strclss].update({'items': []})
try:
dump_list = sorted(objects_list, key=lambda k: k.get_name())
except AttributeError: # pragma: no cover, simple protection
dump_list = objects_list
# Dump at DEBUG level because some tests break with INFO level, and it is not
# really necessary to have information about each object ;
for cur_obj in dump_list:
if strclss == 'services':
res['monitoring_objects'][strclss]['items'].append(cur_obj.get_full_name())
else:
res['monitoring_objects'][strclss]['items'].append(cur_obj.get_name())
# Arbiter counters, including the loaded configuration objects and the dispatcher data
counters = res['counters']
counters['external-commands'] = len(self.external_commands)
counters['broks'] = len(self.broks)
for _, _, strclss, _, _ in list(self.conf.types_creations.values()):
if strclss in ['hostescalations', 'serviceescalations']:
logger.debug("Ignoring count for '%s'...", strclss)
continue
objects_list = getattr(self.conf, strclss, [])
counters[strclss] = len(objects_list)
# Configuration dispatch counters
if getattr(self, "dispatcher", None):
for sat_type in ('arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers'):
counters["dispatcher.%s" % sat_type] = len(getattr(self.dispatcher, sat_type))
# Report our daemons states, but only if a dispatcher exists
if getattr(self, 'dispatcher', None):
# Daemon properties that we are interested in
res['daemons_states'] = {}
for satellite in self.dispatcher.all_daemons_links:
if satellite == self.link_to_myself:
continue
# Get the information to be published for a satellite
res['daemons_states'][satellite.name] = satellite.give_satellite_json()
res['livestate'] = {
"timestamp": now,
"daemons": {}
}
state = 0
for satellite in self.dispatcher.all_daemons_links:
if satellite == self.link_to_myself:
continue
livestate = 0
if satellite.active:
if not satellite.reachable:
livestate = 1
elif not satellite.alive:
livestate = 2
state = max(state, livestate)
else:
livestate = 3
res['livestate']['daemons'][satellite.name] = livestate
res['livestate'].update({
"state": state,
"output": [
"all daemons are up and running.",
"warning because some daemons are not reachable.",
"critical because some daemons not responding."
][state],
# "long_output": "Long output...",
# "perf_data": "'counter'=1"
})
return res
def get_monitoring_problems(self):
"""Get the schedulers satellites problems list
:return: problems dictionary
:rtype: dict
"""
res = self.get_id()
res['problems'] = {}
# Report our schedulers information, but only if a dispatcher exists
if getattr(self, 'dispatcher', None) is None:
return res
for satellite in self.dispatcher.all_daemons_links:
if satellite.type not in ['scheduler']:
continue
if not satellite.active:
continue
if satellite.statistics and 'problems' in satellite.statistics:
res['problems'][satellite.name] = {
'_freshness': satellite.statistics['_freshness'],
'problems': satellite.statistics['problems']
}
return res
def get_livesynthesis(self):
"""Get the schedulers satellites live synthesis
:return: compiled livesynthesis dictionary
:rtype: dict
"""
res = self.get_id()
res['livesynthesis'] = {
'_overall': {
'_freshness': int(time.time()),
'livesynthesis': {
'hosts_total': 0,
'hosts_not_monitored': 0,
'hosts_up_hard': 0,
'hosts_up_soft': 0,
'hosts_down_hard': 0,
'hosts_down_soft': 0,
'hosts_unreachable_hard': 0,
'hosts_unreachable_soft': 0,
'hosts_problems': 0,
'hosts_acknowledged': 0,
'hosts_in_downtime': 0,
'hosts_flapping': 0,
'services_total': 0,
'services_not_monitored': 0,
'services_ok_hard': 0,
'services_ok_soft': 0,
'services_warning_hard': 0,
'services_warning_soft': 0,
'services_critical_hard': 0,
'services_critical_soft': 0,
'services_unknown_hard': 0,
'services_unknown_soft': 0,
'services_unreachable_hard': 0,
'services_unreachable_soft': 0,
'services_problems': 0,
'services_acknowledged': 0,
'services_in_downtime': 0,
'services_flapping': 0,
}
}
}
# Report our schedulers information, but only if a dispatcher exists
if getattr(self, 'dispatcher', None) is None:
return res
for satellite in self.dispatcher.all_daemons_links:
if satellite.type not in ['scheduler']:
continue
if not satellite.active:
continue
if 'livesynthesis' in satellite.statistics:
# Scheduler detailed live synthesis
res['livesynthesis'][satellite.name] = {
'_freshness': satellite.statistics['_freshness'],
'livesynthesis': satellite.statistics['livesynthesis']
}
# Cumulated live synthesis
for prop in res['livesynthesis']['_overall']['livesynthesis']:
if prop in satellite.statistics['livesynthesis']:
res['livesynthesis']['_overall']['livesynthesis'][prop] += \
satellite.statistics['livesynthesis'][prop]
return res
def get_alignak_status(self, details=False):
# pylint: disable=too-many-locals, too-many-branches
"""Push the alignak overall state as a passive check
Build all the daemons overall state as a passive check that can be notified
to the Alignak WS
The Alignak Arbiter is considered as an host which services are all the Alignak
running daemons. An Alignak daemon is considered as a service of an Alignak host.
As such, it reports its status as a passive service check formatted as defined for
the Alignak WS module (see http://alignak-module-ws.readthedocs.io)
:return: A dict with the following structure
::
{
'name': 'type and name of the daemon',
'livestate': {
'state': "ok",
'output': "state message",
'long_output': "state message - longer ... if any",
'perf_data': "daemon metrics (if any...)"
}
"services": {
"daemon-1": {
'name': 'type and name of the daemon',
'livestate': {
'state': "ok",
'output': "state message",
'long_output': "state message - longer ... if any",
'perf_data': "daemon metrics (if any...)"
}
}
.../...
"daemon-N": {
'name': 'type and name of the daemon',
'livestate': {
'state': "ok",
'output': "state message",
'long_output': "state message - longer ... if any",
'perf_data': "daemon metrics (if any...)"
}
}
}
}
:rtype: dict
"""
now = int(time.time())
# Get the arbiter statistics
inner_stats = self.get_daemon_stats(details=details)
res = {
"name": inner_stats['alignak'],
"template": {
"_templates": ["alignak", "important"],
"alias": inner_stats['alignak'],
"active_checks_enabled": False,
"passive_checks_enabled": True,
"notes": ''
},
"variables": {
},
"livestate": {
"timestamp": now,
"state": "unknown",
"output": "",
"long_output": "",
"perf_data": ""
},
"services": []
}
if details:
res = {
"name": inner_stats['alignak'],
"template": {
"_templates": ["alignak", "important"],
"alias": inner_stats['alignak'],
"active_checks_enabled": False,
"passive_checks_enabled": True,
"notes": ''
},
"variables": {
},
"livestate": {
"timestamp": now,
"state": "unknown",
"output": "",
"long_output": "",
"perf_data": ""
},
"services": []
}
# Create self arbiter service - I am now considered as a service for my Alignak monitor!
if 'livestate' in inner_stats:
livestate = inner_stats['livestate']
res['services'].append({
"name": inner_stats['name'],
"livestate": {
"timestamp": now,
"state": ["ok", "warning", "critical", "unknown"][livestate['state']],
"output": livestate['output'],
"long_output": livestate['long_output'] if 'long_output' in livestate else "",
"perf_data": livestate['perf_data'] if 'perf_data' in livestate else ""
}
})
# Alignak performance data are:
# 1/ the monitored items counters
if 'counters' in inner_stats:
metrics = []
my_counters = [strclss for _, _, strclss, _, _ in
list(self.conf.types_creations.values())
if strclss not in ['hostescalations', 'serviceescalations']]
for counter in inner_stats['counters']:
# Only the arbiter created objects...
if counter not in my_counters:
continue
metrics.append("'%s'=%d" % (counter, inner_stats['counters'][counter]))
res['livestate']['perf_data'] = ' '.join(metrics)
# Report the arbiter daemons states, but only if they exist...
if 'daemons_states' in inner_stats:
state = 0
long_output = []
for daemon_id in sorted(inner_stats['daemons_states']):
daemon = inner_stats['daemons_states'][daemon_id]
# Ignore daemons that are not active in the configuration
if not daemon['active']:
continue
res['services'].append({
"name": daemon_id,
"livestate": {
"timestamp": now,
"name": "%s_%s" % (daemon['type'], daemon['name']),
"state": ["ok", "warning", "critical", "unknown"][daemon['livestate']],
"output": [
u"daemon is alive and reachable.",
u"daemon is not reachable.",
u"daemon is not alive."
][daemon['livestate']],
"long_output": "Realm: %s (%s). Listening on: %s" % (
daemon['realm_name'], daemon['manage_sub_realms'], daemon['uri']),
"perf_data": "last_check=%.2f" % daemon['last_check']
}
})
state = max(state, daemon['livestate'])
long_output.append("%s - %s" % (
daemon_id, [u"daemon is alive and reachable.",
u"daemon is not reachable.",
u"daemon is not alive."][daemon['livestate']]))
res['livestate'].update({
"state": "up", # Always Up ;)
"output": [u"All my daemons are up and running.",
u"Some of my daemons are not reachable.",
u"Some of my daemons are not responding!"][state],
"long_output": '\n'.join(long_output)
})
log_level = 'info'
if state == 1: # DOWN
log_level = 'error'
if state == 2: # UNREACHABLE
log_level = 'warning'
if self.conf.log_alignak_checks or state > 0:
self.add(make_monitoring_log(log_level, 'ALIGNAK CHECK;%s;%d;%s;%s' % (
self.alignak_name, state, res['livestate']['output'],
res['livestate']['long_output']
)))
if self.my_status != state:
self.my_status = state
self.add(make_monitoring_log(log_level, 'ALIGNAK ALERT;%s;%d;%s;%s' % (
self.alignak_name, state, res['livestate']['output'],
res['livestate']['long_output']
)))
if self.alignak_monitor:
logger.debug("Pushing Alignak passive check to %s: %s", self.alignak_monitor, res)
if self.my_monitor is None:
self.my_monitor = MonitorConnection(self.alignak_monitor)
if not self.my_monitor.authenticated:
self.my_monitor.login(self.alignak_monitor_username,
self.alignak_monitor_password)
result = self.my_monitor.patch('host', res)
logger.debug("Monitor reporting result: %s", result)
else:
logger.debug("No configured Alignak monitor to receive: %s", res)
return res
def main(self):
"""Main arbiter function::
* Set logger
* Init daemon
* Launch modules
* Endless main process loop
:return: None
"""
try:
# Start the daemon
if not self.verify_only and not self.do_daemon_init_and_start():
self.exit_on_error(message="Daemon initialization error", exit_code=3)
if self.verify_only:
self.setup_alignak_logger()
# Load monitoring configuration files
self.load_monitoring_config_file()
if self.verify_only:
# Exit!
self.request_stop()
# Set my own process title
self.set_proctitle(self.name)
# Now we can start our "external" modules (if any):
self.modules_manager.start_external_instances()
# Now we can load the retention data
self.hook_point('load_retention')
# And go for the main loop
while True:
self.do_main_loop()
logger.info("Exited from the main loop.")
# Exiting the main loop because of a configuration reload
if not self.need_config_reload:
# If no configuration reload is required, stop the arbiter daemon
self.request_stop()
else:
# Loop if a configuration reload is raised while
# we are still reloading the configuration
while self.need_config_reload:
# Clear the former configuration
self.need_config_reload = False
self.link_to_myself = None
self.conf = Config()
# Load monitoring configuration files
_ts = time.time()
logger.warning('--- Reloading configuration...')
self.load_monitoring_config_file()
duration = int(time.time() - _ts)
self.add(make_monitoring_log('info', 'CONFIGURATION RELOAD;%d' % duration))
logger.warning('--- Configuration reloaded, %d seconds', duration)
# Make a pause to let our satellites get ready...
pause = max(1, self.conf.daemons_new_conf_timeout)
if pause:
logger.info("Pausing %.2f seconds...", pause)
time.sleep(pause)
except Exception as exp: # pragma: no cover, this should never happen indeed ;)
# Only a master arbiter can stop the daemons
if self.is_master:
# Stop the daemons
self.daemons_stop(timeout=self.conf.daemons_stop_timeout)
self.exit_on_exception(raised_exception=exp)
raise
|
class Arbiter(Daemon):
'''
Arbiter class. Referenced as "app" in most Interface
Class to manage the Arbiter daemon.
The Arbiter is the one that rules them all...
'''
def __init__(self, **kwargs):
'''Arbiter daemon initialisation
:param kwargs: command line arguments
'''
pass
def add(self, elt):
'''Generic function to add objects to the daemon internal lists.
Manage Broks, External commands
:param elt: objects to add
:type elt: alignak.AlignakObject
:return: None
'''
pass
def get_managed_configurations(self):
'''Get the configuration managed by this arbiter
This is used by the master arbiter to get information from its spare arbiter
:return: a dict of arbiter links (only one) with instance_id as key and
hash, push_flavor and configuration identifier as values
:rtype: dict
'''
pass
def push_broks_to_broker(self):
'''Send all broks from arbiter internal list to broker
The arbiter get some broks and then pushes them to all the brokers.
:return: None
'''
pass
def push_external_commands_to_schedulers(self):
'''Send external commands to schedulers
:return: None
'''
pass
def get_external_commands(self):
'''Get the external commands
:return: External commands list
:rtype: list
'''
pass
def get_broks_from_satellites(self):
'''Get broks from my all internal satellite links
The arbiter get the broks from ALL the known satellites
:return: None
'''
pass
def get_initial_broks_from_satellites(self):
'''Get initial broks from my internal satellite links
:return: None
'''
pass
def load_monitoring_config_file(self, clean=True):
'''Load main configuration file (alignak.cfg)::
* Read all files given in the -c parameters
* Read all .cfg files in cfg_dir
* Read all files in cfg_file
* Create objects (Arbiter, Module)
* Set HTTP links info (ssl etc)
* Load its own modules
* Execute read_configuration hook (for arbiter modules)
* Create all objects (Service, Host, Realms ...)
* "Compile" configuration (Linkify, explode, apply inheritance, fill default values ...)
* Cut conf into parts and prepare it for sending
The clean parameter is useful to load a configuration without removing the properties
only used to parse the configuration and create the objects. Some utilities (like
alignak-backend-import script) may need to avoid the cleaning ;)
Note that default is no cleaning!
:param clean: set True to clean the created items
:type clean: bool
:return: None
'''
pass
def load_modules_configuration_objects(self, raw_objects):
'''Load configuration objects from arbiter modules
If module implements get_objects arbiter will call it and add create
objects
:param raw_objects: raw objects we got from reading config files
:type raw_objects: dict
:return: None
'''
pass
def load_modules_alignak_configuration(self):
'''Load Alignak configuration from the arbiter modules
If module implements get_alignak_configuration, call this function
:return: None
'''
pass
def request_stop(self, message='', exit_code=0):
'''Stop the Arbiter daemon
:return: None
'''
pass
def start_daemon(self, satellite):
'''Manage the list of detected missing daemons
If the daemon does not in exist `my_daemons`, then:
- prepare daemon start arguments (port, name and log file)
- start the daemon
- make sure it started correctly
:param satellite: the satellite for which a daemon is to be started
:type satellite: SatelliteLink
:return: True if the daemon started correctly
'''
pass
def daemons_start(self, run_daemons=True):
'''Manage the list of the daemons in the configuration
Check if the daemon needs to be started by the Arbiter.
If so, starts the daemon if `run_daemons` is True
:param run_daemons: run the daemons or make a simple check
:type run_daemons: bool
:return: True if all daemons are running, else False. always True for a simple check
'''
pass
def daemons_check(self):
'''Manage the list of Alignak launched daemons
Check if the daemon process is running
:return: True if all daemons are running, else False
'''
pass
def daemons_stop(self, timeout=30, kill_children=False):
'''Stop the Alignak daemons
Iterate over the self-launched daemons and their children list to send a TERM
Wait for daemons to terminate and then send a KILL for those that are not yet stopped
As a default behavior, only the launched daemons are killed, not their children.
Each daemon will manage its children killing
:param timeout: delay to wait before killing a daemon
:type timeout: int
:param kill_children: also kill the children (defaults to False)
:type kill_children: bool
:return: True if all daemons stopped
'''
pass
def on_terminate(proc):
'''Process termination callback function'''
pass
def daemons_reachability_check(self):
'''Manage the list of Alignak launched daemons
Check if the daemon process is running
Then, check the daemon status and get its monitoring events
:return: True if all daemons are running, else False
'''
pass
def setup_new_conf(self):
''' Setup a new configuration received from a Master arbiter.
TODO: perharps we should not accept the configuration or raise an error if we do not
find our own configuration data in the data. Thus this should never happen...
:return: None
'''
pass
def wait_for_master_death(self):
'''Wait for a master timeout and take the lead if necessary
:return: None
'''
pass
def check_and_log_tp_activation_change(self):
'''Raise log for timeperiod change (useful for debug)
:return: None
'''
pass
def manage_signal(self, sig, frame):
'''Manage signals caught by the process
Specific behavior for the arbiter when it receives a sigkill or sigterm
:param sig: signal caught by the process
:type sig: str
:param frame: current stack frame
:type frame:
:return: None
'''
pass
def configuration_dispatch(self, not_configured=None):
'''Monitored configuration preparation and dispatch
:return: None
'''
pass
def do_before_loop(self):
'''Called before the main daemon loop.
:return: None
'''
pass
def do_loop_turn(self):
'''Loop turn for Arbiter
If not a master daemon, wait for my master death...
Else, run:
* Check satellites are alive
* Check and dispatch (if needed) the configuration
* Get broks and external commands from the satellites
* Push broks and external commands to the satellites
:return: None
'''
pass
def get_daemon_stats(self, details=False):
'''Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
'''
pass
def get_monitoring_problems(self):
'''Get the schedulers satellites problems list
:return: problems dictionary
:rtype: dict
'''
pass
def get_livesynthesis(self):
'''Get the schedulers satellites live synthesis
:return: compiled livesynthesis dictionary
:rtype: dict
'''
pass
def get_alignak_status(self, details=False):
'''Push the alignak overall state as a passive check
Build all the daemons overall state as a passive check that can be notified
to the Alignak WS
The Alignak Arbiter is considered as an host which services are all the Alignak
running daemons. An Alignak daemon is considered as a service of an Alignak host.
As such, it reports its status as a passive service check formatted as defined for
the Alignak WS module (see http://alignak-module-ws.readthedocs.io)
:return: A dict with the following structure
::
{
'name': 'type and name of the daemon',
'livestate': {
'state': "ok",
'output': "state message",
'long_output': "state message - longer ... if any",
'perf_data': "daemon metrics (if any...)"
}
"services": {
"daemon-1": {
'name': 'type and name of the daemon',
'livestate': {
'state': "ok",
'output': "state message",
'long_output': "state message - longer ... if any",
'perf_data': "daemon metrics (if any...)"
}
}
.../...
"daemon-N": {
'name': 'type and name of the daemon',
'livestate': {
'state': "ok",
'output': "state message",
'long_output': "state message - longer ... if any",
'perf_data': "daemon metrics (if any...)"
}
}
}
}
:rtype: dict
'''
pass
def main(self):
'''Main arbiter function::
* Set logger
* Init daemon
* Launch modules
* Endless main process loop
:return: None
'''
pass
| 31 | 31 | 68 | 9 | 41 | 19 | 9 | 0.46 | 1 | 24 | 10 | 0 | 29 | 37 | 29 | 80 | 2,094 | 307 | 1,246 | 231 | 1,215 | 567 | 972 | 225 | 941 | 64 | 2 | 5 | 282 |
3,885 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daemons/brokerdaemon.py
|
alignak.daemons.brokerdaemon.Broker
|
class Broker(BaseSatellite):
"""
Class to manage a Broker daemon
A Broker is used to get data from Scheduler and send them to modules. These modules in most
cases export to other software, databases...
"""
properties = BaseSatellite.properties.copy()
properties.update({
'type':
StringProp(default='broker'),
'port':
IntegerProp(default=7772)
})
def __init__(self, **kwargs):
"""Broker daemon initialisation
:param kwargs: command line arguments
"""
super(Broker, self).__init__(kwargs.get('daemon_name', 'Default-broker'), **kwargs)
self.got_initial_broks = False
# Our schedulers and arbiters are initialized in the base class
# Our pollers, reactionners and receivers
self.pollers = {}
self.reactionners = {}
self.receivers = {}
# Modules are load one time
self.have_modules = False
# All broks to manage
self.external_broks = [] # broks to manage
# broks raised internally by the broker
self.internal_broks = []
# broks raised by the arbiters, we need a lock so the push can be in parallel
# to our current activities and won't lock the arbiter
self.arbiter_broks = []
self.arbiter_broks_lock = threading.RLock()
self.timeout = 1.0
self.http_interface = BrokerInterface(self)
def add(self, elt):
"""Generic function to add objects to the daemon internal lists.
Manage Broks, External commands and Messages (from modules queues)
:param elt: object to add
:type elt: alignak.AlignakObject
:return: None
"""
if isinstance(elt, Brok):
# For brok, we tag the brok with our instance_id
elt.instance_id = self.instance_id
if elt.type == 'monitoring_log':
# The brok is a monitoring event
with self.events_lock:
self.events.append(elt)
statsmgr.counter('events', 1)
# Also add to our broks
with self.broks_lock:
self.broks.append(elt)
statsmgr.counter('broks.added', 1)
elif isinstance(elt, ExternalCommand):
logger.debug("Queuing an external command '%s'", str(elt.__dict__))
with self.external_commands_lock:
self.external_commands.append(elt)
statsmgr.counter('external-commands.added', 1)
# Maybe we got a Message from the modules, it's way to ask something
# like from now a full data from a scheduler for example.
elif isinstance(elt, Message):
# We got a message, great!
logger.debug(str(elt.__dict__))
if elt.get_type() == 'NeedData':
data = elt.get_data()
# Full instance id means: I got no data for this scheduler
# so give me all dumb-ass!
if 'full_instance_id' in data:
c_id = data['full_instance_id']
source = getattr(elt, 'source', getattr(elt, '_source', None))
logger.info('The module %s is asking me to get all initial data '
'from the scheduler %s', source, c_id)
# so we just reset the connection and the running_id,
# it will just get all new things
try:
self.schedulers[c_id]['con'] = None
self.schedulers[c_id]['running_id'] = 0
except KeyError: # maybe this instance was not known, forget it
logger.warning("the module %s ask me a full_instance_id "
"for an unknown ID (%s)!", source, c_id)
# Maybe a module tells me that it's dead, I must log its last words...
if elt.get_type() == 'ICrash':
data = elt.get_data()
logger.error('the module %s just crash! Please look at the traceback:',
data['name'])
logger.error(data['trace'])
statsmgr.counter('message.added', 1)
# The module death will be looked for elsewhere and restarted.
def manage_brok(self, brok):
"""Get a brok.
We put brok data to the modules
:param brok: object with data
:type brok: object
:return: None
"""
# Unserialize the brok before consuming it
brok.prepare()
for module in self.modules_manager.get_internal_instances():
try:
_t0 = time.time()
module.manage_brok(brok)
statsmgr.timer('manage-broks.internal.%s' % module.get_name(), time.time() - _t0)
except Exception as exp: # pylint: disable=broad-except
logger.warning("The module %s raised an exception: %s, "
"I'm tagging it to restart later", module.get_name(), str(exp))
logger.exception(exp)
self.modules_manager.set_to_restart(module)
def get_internal_broks(self):
"""Get all broks from self.broks_internal_raised and append them to our broks
to manage
:return: None
"""
statsmgr.gauge('get-new-broks-count.broker', len(self.internal_broks))
# Add the broks to our global list
self.external_broks.extend(self.internal_broks)
self.internal_broks = []
def get_arbiter_broks(self):
"""Get the broks from the arbiters,
but as the arbiter_broks list can be push by arbiter without Global lock,
we must protect this with a lock
TODO: really? check this arbiter behavior!
:return: None
"""
with self.arbiter_broks_lock:
statsmgr.gauge('get-new-broks-count.arbiter', len(self.arbiter_broks))
# Add the broks to our global list
self.external_broks.extend(self.arbiter_broks)
self.arbiter_broks = []
def get_new_broks(self):
"""Get new broks from our satellites
:return: None
"""
for satellites in [self.schedulers, self.pollers, self.reactionners, self.receivers]:
for satellite_link in list(satellites.values()):
logger.debug("Getting broks from %s", satellite_link)
_t0 = time.time()
try:
tmp_broks = satellite_link.get_broks(self.name)
except LinkError:
logger.warning("Daemon %s connection failed, I could not get the broks!",
satellite_link)
else:
if tmp_broks:
logger.debug("Got %d Broks from %s in %s",
len(tmp_broks), satellite_link.name, time.time() - _t0)
statsmgr.gauge('get-new-broks-count.%s' % satellite_link.name,
len(tmp_broks))
statsmgr.timer('get-new-broks-time.%s' % satellite_link.name,
time.time() - _t0)
for brok in tmp_broks:
brok.instance_id = satellite_link.instance_id
# Add the broks to our global list
self.external_broks.extend(tmp_broks)
# def do_stop(self):
# """Stop all children of this process
#
# :return: None
# """
# # my_active_children = active_children()
# # for child in my_active_children:
# # child.terminate()
# # child.join(1)
# super(Broker, self).do_stop()
def setup_new_conf(self):
# pylint: disable=too-many-branches, too-many-locals
"""Broker custom setup_new_conf method
This function calls the base satellite treatment and manages the configuration needed
for a broker daemon:
- get and configure its pollers, reactionners and receivers relation
- configure the modules
:return: None
"""
# Execute the base class treatment...
super(Broker, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# # self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
self.got_initial_broks = False
# Now we create our pollers, reactionners and receivers
for link_type in ['pollers', 'reactionners', 'receivers']:
if link_type not in self.cur_conf['satellites']:
logger.error("No %s in the configuration!", link_type)
continue
my_satellites = getattr(self, link_type, {})
received_satellites = self.cur_conf['satellites'][link_type]
for link_uuid in received_satellites:
rs_conf = received_satellites[link_uuid]
logger.debug("- received %s - %s: %s", rs_conf['instance_id'],
rs_conf['type'], rs_conf['name'])
# Must look if we already had a configuration and save our broks
already_got = rs_conf['instance_id'] in my_satellites
broks = []
actions = {}
wait_homerun = {}
external_commands = {}
running_id = 0
if already_got:
logger.warning("I already got: %s", rs_conf['instance_id'])
# Save some information
running_id = my_satellites[link_uuid].running_id
(broks, actions,
wait_homerun, external_commands) = \
my_satellites[link_uuid].get_and_clear_context()
# Delete the former link
del my_satellites[link_uuid]
# My new satellite link...
new_link = SatelliteLink.get_a_satellite_link(link_type[:-1],
rs_conf)
my_satellites[new_link.uuid] = new_link
logger.info("I got a new %s satellite: %s", link_type[:-1], new_link.name)
new_link.running_id = running_id
new_link.external_commands = external_commands
new_link.broks = broks
new_link.wait_homerun = wait_homerun
new_link.actions = actions
# Replace satellite address and port by those defined in satellite_map
# todo: check if it is really necessary! Add a unit test for this
# Not sure about this because of the daemons/satellites configuration
# if new_link.name in self_conf.get('satellite_map', {}):
# new_link = dict(new_link) # make a copy
# new_link.update(self_conf.get('satellite_map', {})[new_link.name])
if not self.have_modules:
try:
self.modules = unserialize(self.cur_conf['modules'], no_json=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.info("I received some modules configuration")
self.have_modules = True
# Ok now start, or restart them!
# Set modules, init them and start external ones
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
# Initialize connection with my schedulers first
logger.info("Initializing connection with my schedulers:")
my_satellites = self.get_links_of_type(s_type='scheduler')
for satellite in list(my_satellites.values()):
logger.info("- %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
for sat_type in ['arbiter', 'reactionner', 'poller', 'receiver']:
my_satellites = self.get_links_of_type(s_type=sat_type)
for satellite in list(my_satellites.values()):
logger.info("- %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Now I have a configuration!
self.have_conf = True
def clean_previous_run(self):
"""Clean all (when we received new conf)
:return: None
"""
# Execute the base class treatment...
super(Broker, self).clean_previous_run()
# Clean all satellites relations
self.pollers.clear()
self.reactionners.clear()
self.receivers.clear()
# Clean our internal objects
self.external_broks = self.external_broks[:]
self.internal_broks = self.internal_broks[:]
with self.arbiter_broks_lock:
self.arbiter_broks = self.arbiter_broks[:]
self.external_commands = self.external_commands[:]
# And now modules
# self.have_modules = False
# self.modules_manager.clear_instances()
def do_loop_turn(self):
# pylint: disable=too-many-branches
"""Loop used to:
* get initial status broks
* check if modules are alive, if not restart them
* get broks from ourself, the arbiters and our satellites
* add broks to the queue of each external module
* manage broks with each internal module
If the internal broks management is longer than 0.8 seconds, postpone to hte next
loop turn to avoid overloading the broker daemon.
:return: None
"""
if not self.got_initial_broks:
# Asking initial broks from my schedulers
my_satellites = self.get_links_of_type(s_type='scheduler')
for satellite in list(my_satellites.values()):
logger.info("Asking my initial broks from '%s'", satellite.name)
_t0 = time.time()
try:
my_initial_broks = satellite.get_initial_broks(self.name)
statsmgr.timer('broks.initial.%s.time' % satellite.name, time.time() - _t0)
if not my_initial_broks:
logger.info("No initial broks were raised, "
"my scheduler is not yet ready...")
return
self.got_initial_broks = True
logger.debug("Got %d initial broks from '%s'",
my_initial_broks, satellite.name)
statsmgr.gauge('broks.initial.%s.count' % satellite.name, my_initial_broks)
except LinkError as exp:
logger.warning("Scheduler connection failed, I could not get initial broks!")
logger.debug("Begin Loop: still some old broks to manage (%d)", len(self.external_broks))
if self.external_broks:
statsmgr.gauge('broks.unmanaged', len(self.external_broks))
# Try to see if one of my module is dead, and restart previously dead modules
self.check_and_del_zombie_modules()
# Call modules that manage a starting tick pass
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
# Maybe the last loop we did raised some broks internally
self.get_internal_broks()
# Also reap broks sent from the arbiters
self.get_arbiter_broks()
# Now get broks from our distant daemons
self.get_new_broks()
# Get the list of broks not yet sent to our external modules
_t0 = time.time()
broks_to_send = [brok for brok in self.external_broks if getattr(brok, 'to_be_sent', True)]
statsmgr.gauge('get-new-broks-count.to_send', len(broks_to_send))
# Send the broks to all external modules to_q queue so they can get the whole packet
# beware, the sub-process/queue can be die/close, so we put to restart the whole module
# instead of killing ourselves :)
for module in self.modules_manager.get_external_instances():
try:
_t00 = time.time()
queue_size = module.to_q.qsize()
statsmgr.gauge('queues.external.%s.to.size' % module.get_name(), queue_size)
module.to_q.put(broks_to_send)
statsmgr.timer('queues.external.%s.to.put' % module.get_name(), time.time() - _t00)
except Exception as exp: # pylint: disable=broad-except
# first we must find the modules
logger.warning("Module %s queue exception: %s, I'm tagging it to restart later",
module.get_name(), str(exp))
logger.exception(exp)
self.modules_manager.set_to_restart(module)
# No more need to send them
for brok in broks_to_send:
brok.to_be_sent = False
logger.debug("Time to send %s broks (%d secs)", len(broks_to_send), time.time() - _t0)
# Make the internal modules manage the broks
start = time.time()
while self.external_broks:
now = time.time()
# Do not 'manage' more than 0.8s, we must get new broks almost every second
if now - start > 0.8:
logger.info("I did not yet managed all my broks, still %d broks",
len(self.external_broks))
break
# Get the first brok in the list
brok = self.external_broks.pop(0)
if self.modules_manager.get_internal_instances():
self.manage_brok(brok)
# Make a very short pause to avoid overloading
self.make_a_pause(0.01, check_time_change=False)
else:
if getattr(brok, 'to_be_sent', False):
self.external_broks.append(brok)
# Maybe our external modules raised 'objects', so get them
if self.get_objects_from_from_queues():
statsmgr.gauge('external-commands.got.count', len(self.external_commands))
statsmgr.gauge('broks.got.count', len(self.external_broks))
def get_daemon_stats(self, details=False):
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
# Call the base Daemon one
res = super(Broker, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type})
counters = res['counters']
counters['broks-external'] = len(self.external_broks)
counters['broks-internal'] = len(self.internal_broks)
counters['broks-arbiter'] = len(self.arbiter_broks)
counters['satellites.pollers'] = len(self.pollers)
counters['satellites.reactionners'] = len(self.reactionners)
counters['satellites.receivers'] = len(self.receivers)
return res
def main(self):
"""Main function, will loop forever
:return: None
"""
try:
# Start the daemon mode
if not self.do_daemon_init_and_start():
self.exit_on_error(message="Daemon initialization error", exit_code=3)
# We wait for initial conf
self.wait_for_initial_conf()
if self.new_conf:
# Setup the received configuration
self.setup_new_conf()
# Restore retention data
self.hook_point('load_retention')
# Now the main loop
self.do_main_loop()
logger.info("Exited from the main loop.")
self.request_stop()
except Exception: # pragma: no cover, this should never happen indeed ;)
self.exit_on_exception(traceback.format_exc())
raise
|
class Broker(BaseSatellite):
'''
Class to manage a Broker daemon
A Broker is used to get data from Scheduler and send them to modules. These modules in most
cases export to other software, databases...
'''
def __init__(self, **kwargs):
'''Broker daemon initialisation
:param kwargs: command line arguments
'''
pass
def add(self, elt):
'''Generic function to add objects to the daemon internal lists.
Manage Broks, External commands and Messages (from modules queues)
:param elt: object to add
:type elt: alignak.AlignakObject
:return: None
'''
pass
def manage_brok(self, brok):
'''Get a brok.
We put brok data to the modules
:param brok: object with data
:type brok: object
:return: None
'''
pass
def get_internal_broks(self):
'''Get all broks from self.broks_internal_raised and append them to our broks
to manage
:return: None
'''
pass
def get_arbiter_broks(self):
'''Get the broks from the arbiters,
but as the arbiter_broks list can be push by arbiter without Global lock,
we must protect this with a lock
TODO: really? check this arbiter behavior!
:return: None
'''
pass
def get_new_broks(self):
'''Get new broks from our satellites
:return: None
'''
pass
def setup_new_conf(self):
'''Broker custom setup_new_conf method
This function calls the base satellite treatment and manages the configuration needed
for a broker daemon:
- get and configure its pollers, reactionners and receivers relation
- configure the modules
:return: None
'''
pass
def clean_previous_run(self):
'''Clean all (when we received new conf)
:return: None
'''
pass
def do_loop_turn(self):
'''Loop used to:
* get initial status broks
* check if modules are alive, if not restart them
* get broks from ourself, the arbiters and our satellites
* add broks to the queue of each external module
* manage broks with each internal module
If the internal broks management is longer than 0.8 seconds, postpone to hte next
loop turn to avoid overloading the broker daemon.
:return: None
'''
pass
def get_daemon_stats(self, details=False):
'''Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
'''
pass
def main(self):
'''Main function, will loop forever
:return: None
'''
pass
| 12 | 12 | 40 | 5 | 23 | 12 | 5 | 0.58 | 1 | 12 | 7 | 0 | 11 | 14 | 11 | 72 | 480 | 74 | 261 | 66 | 249 | 151 | 235 | 63 | 223 | 14 | 3 | 5 | 54 |
3,886 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/timeperiod.py
|
alignak.objects.timeperiod.Timeperiods
|
class Timeperiods(Items):
"""
Class to manage all timeperiods
A timeperiod is defined with range time (hours) of week to do action
and add day exceptions (like non working days)
"""
name_property = "timeperiod_name"
inner_class = Timeperiod
def explode(self):
"""
Try to resolve each timeperiod
:return: None
"""
for t_id in self.items:
timeperiod = self.items[t_id]
timeperiod.explode()
def linkify(self):
"""
Check exclusion for each timeperiod
:return: None
"""
for t_id in self.items:
timeperiod = self.items[t_id]
timeperiod.linkify(self)
def get_unresolved_properties_by_inheritance(self, timeperiod):
"""
Fill full properties with template if needed for the
unresolved values (example: sunday ETCETC)
:return: None
"""
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in timeperiod.templates:
template = self.templates[i]
timeperiod.unresolved.extend(template.unresolved)
def apply_inheritance(self):
"""
The only interesting property to inherit is exclude
:return: None
"""
self.apply_partial_inheritance('exclude')
for i in self:
self.get_customs_properties_by_inheritance(i)
# And now apply inheritance for unresolved properties
# like the date ranges
for timeperiod in self:
self.get_unresolved_properties_by_inheritance(timeperiod)
def is_correct(self):
"""
check if each properties of timeperiods are valid
:return: True if is correct, otherwise False
:rtype: bool
"""
valid = True
# We do not want a same hg to be explode again and again
# so we tag it
for timeperiod in list(self.items.values()):
timeperiod.rec_tag = False
for timeperiod in list(self.items.values()):
for tmp_tp in list(self.items.values()):
tmp_tp.rec_tag = False
valid = timeperiod.check_exclude_rec() and valid
# We clean the tags and collect the warning/erro messages
for timeperiod in list(self.items.values()):
del timeperiod.rec_tag
# Now other checks
if not timeperiod.is_correct():
valid = False
self.add_error("Configuration in %s::%s is incorrect; from: %s"
% (timeperiod.my_type, timeperiod.get_name(),
timeperiod.imported_from))
self.configuration_errors += timeperiod.configuration_errors
self.configuration_warnings += timeperiod.configuration_warnings
# And check all timeperiods for correct (sunday is false)
for timeperiod in self:
valid = timeperiod.is_correct() and valid
return valid
|
class Timeperiods(Items):
'''
Class to manage all timeperiods
A timeperiod is defined with range time (hours) of week to do action
and add day exceptions (like non working days)
'''
def explode(self):
'''
Try to resolve each timeperiod
:return: None
'''
pass
def linkify(self):
'''
Check exclusion for each timeperiod
:return: None
'''
pass
def get_unresolved_properties_by_inheritance(self, timeperiod):
'''
Fill full properties with template if needed for the
unresolved values (example: sunday ETCETC)
:return: None
'''
pass
def apply_inheritance(self):
'''
The only interesting property to inherit is exclude
:return: None
'''
pass
def is_correct(self):
'''
check if each properties of timeperiods are valid
:return: True if is correct, otherwise False
:rtype: bool
'''
pass
| 6 | 6 | 16 | 2 | 8 | 6 | 3 | 0.88 | 1 | 1 | 0 | 0 | 5 | 0 | 5 | 50 | 94 | 17 | 41 | 19 | 35 | 36 | 39 | 19 | 33 | 7 | 2 | 2 | 16 |
3,887 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/http/client.py
|
alignak.http.client.HTTPClientDataException
|
class HTTPClientDataException(Exception): # pragma: no cover, hopefully never raised
"""HTTP Data Exception - raised when the HTTP response is not OK (200)
Its attribute are:
- rsp_code: the HTTP response code,
- rsp_text: the HTTP response bodyeout.
"""
def __init__(self, rsp_code, rsp_text, uri):
# Call the base class constructor with the parameters it needs
super(HTTPClientDataException, self).__init__()
self.code = rsp_code
self.text = rsp_text
self.uri = uri
self.msg = "Bad server response for %s: %s - %s" % (self.uri, self.code, self.text)
def __str__(self): # pragma: no cover
"""Exception to String"""
return self.msg
|
class HTTPClientDataException(Exception):
'''HTTP Data Exception - raised when the HTTP response is not OK (200)
Its attribute are:
- rsp_code: the HTTP response code,
- rsp_text: the HTTP response bodyeout.
'''
def __init__(self, rsp_code, rsp_text, uri):
pass
def __str__(self):
'''Exception to String'''
pass
| 3 | 2 | 6 | 1 | 4 | 2 | 1 | 1 | 1 | 1 | 0 | 0 | 2 | 4 | 2 | 12 | 19 | 3 | 9 | 7 | 6 | 9 | 9 | 7 | 6 | 1 | 3 | 0 | 2 |
3,888 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestHostextinfo
|
class TestHostextinfo(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['host_name']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('notes', ''),
('notes_url', ''),
('icon_image', ''),
('icon_image_alt', ''),
('vrml_image', ''),
('statusmap_image', ''),
('2d_coords', ''),
('3d_coords', ''),
])
def setUp(self):
super(TestHostextinfo, self).setUp()
from alignak.objects.hostextinfo import HostExtInfo
self.item = HostExtInfo({}, parsing=True)
|
class TestHostextinfo(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 26 | 4 | 22 | 7 | 19 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,889 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestHostgroup
|
class TestHostgroup(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['hostgroup_name']
properties = dict([
('alias', ''),
('members', []),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
# ('unknown_members', []),
('notes', ''),
('notes_url', ''),
('action_url', ''),
('realm', ''),
('hostgroup_members', []),
])
def setUp(self):
super(TestHostgroup, self).setUp()
from alignak.objects.hostgroup import Hostgroup
self.item = Hostgroup({}, parsing=True)
|
class TestHostgroup(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0.05 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 26 | 4 | 21 | 7 | 18 | 1 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,890 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestModule
|
class TestModule(PropertiesTester, AlignakTest):
unused_props = []
# unused_props = ['option_1', 'option_2', 'option_3']
without_default = ['module_alias', 'python_name']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', 'unset'),
('type', 'unset'),
('daemon', 'unset'),
('module_types', ['']),
('modules', ['']),
('enabled', True),
('log_level', 'INFO'),
('statsd_host', 'localhost'),
('statsd_port', 8125),
('statsd_prefix', 'alignak'),
('statsd_enabled', False)
])
def setUp(self):
super(TestModule, self).setUp()
from alignak.objects.module import Module
self.item = Module({}, parsing=True)
print("Item properties:")
for name in self.item.properties:
print(("- %s" % name))
|
class TestModule(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 8 | 1 | 7 | 0 | 2 | 0.04 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 33 | 5 | 27 | 8 | 24 | 1 | 11 | 8 | 8 | 2 | 2 | 1 | 2 |
3,891 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestNotificationWay
|
class TestNotificationWay(PropertiesTester, AlignakTest):
unused_props = []
without_default = [
'notificationway_name',
'host_notification_period', 'service_notification_period',
]
properties = dict([
('service_notification_options', []),
('host_notification_options', []),
('host_notification_commands', []),
('service_notification_commands', []),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('min_business_impact', 0),
])
def setUp(self):
super(TestNotificationWay, self).setUp()
from alignak.objects.notificationway import NotificationWay
self.item = NotificationWay({}, parsing=True)
|
class TestNotificationWay(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 26 | 4 | 22 | 7 | 19 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,892 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestRealm
|
class TestRealm(PropertiesTester, AlignakTest):
unused_props = []
without_default = []
properties = dict([
('alias', ''),
('members', []),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('realm_name', ''),
('name', ''),
('realm_members', []),
('group_members', []),
('higher_realms', []),
('default', False),
])
def setUp(self):
super(TestRealm, self).setUp()
from alignak.objects.realm import Realm
self.item = Realm({}, parsing=True)
|
class TestRealm(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 25 | 4 | 21 | 7 | 18 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,893 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestResultmodulation
|
class TestResultmodulation(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['resultmodulation_name']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('exit_codes_match', []),
('exit_code_modulation', None),
('modulation_period', None),
])
def setUp(self):
super(TestResultmodulation, self).setUp()
from alignak.objects.resultmodulation import Resultmodulation
self.item = Resultmodulation({}, parsing=True)
|
class TestResultmodulation(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 21 | 4 | 17 | 7 | 14 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,894 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestService
|
class TestService(PropertiesTester, AlignakTest):
unused_props = []
without_default = [
'host_name',
'service_description',
'check_command',
'check_period',
'notification_period',
# 'state_id_before_impact'
]
properties = dict([
('alias', ''),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('max_check_attempts', 1),
('hostgroup_name', ''),
('display_name', ''),
('servicegroups', []),
('is_volatile', False),
('initial_state', 'o'),
('freshness_state', 'x'),
('active_checks_enabled', True),
('passive_checks_enabled', True),
('check_freshness', False),
('freshness_threshold', 0),
('event_handler', ''),
('event_handler_enabled', False),
('check_interval', 0),
('retry_interval', 0),
('low_flap_threshold', 25),
('high_flap_threshold', 50),
('flap_detection_enabled', True),
('flap_detection_options', ['o','w','c','u','x']),
('process_perf_data', True),
('retain_status_information', True),
('retain_nonstatus_information', True),
('notification_interval', 60),
('first_notification_delay', 0),
('notification_options', ['w','u','c','r','f','s', 'x']),
('notifications_enabled', True),
('contacts', []),
('contact_groups', []),
('stalking_options', []),
('notes', ''),
('notes_url', ''),
('action_url', ''),
('icon_image', ''),
('icon_image_alt', ''),
('icon_set', ''),
('parallelize_check', True),
('poller_tag', 'None'),
('reactionner_tag', 'None'),
('resultmodulations', []),
('business_impact_modulations', []),
('escalations', []),
('maintenance_period', ''),
('duplicate_foreach', ''),
('default_value', ''),
('business_impact', 2),
('time_to_orphanage', 300),
('trending_policies', []),
('checkmodulations', []),
('macromodulations', []),
('aggregation', ''),
('service_dependencies', []),
('custom_views', []),
('merge_host_contacts', False),
('business_rule_output_template', ''),
('business_rule_smart_notifications', False),
('business_rule_downtime_as_ack', False),
('labels', []),
('snapshot_interval', 5),
('snapshot_command', ''),
('snapshot_enabled', False),
('snapshot_period', ''),
('snapshot_criteria', ['w','c','u','x']),
('business_rule_host_notification_options', []),
('business_rule_service_notification_options', []),
('host_dependency_enabled', True),
# ('realm', ''),
# ('state_id_before_impact', 0)
])
def setUp(self):
super(TestService, self).setUp()
from alignak.objects.service import Service
self.item = Service({}, parsing=True)
|
class TestService(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0.03 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 93 | 4 | 86 | 7 | 83 | 3 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,895 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestServicedependency
|
class TestServicedependency(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['dependent_host_name', 'dependent_service_description', 'host_name', 'service_description']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('dependent_hostgroup_name', ''),
('hostgroup_name', ''),
('inherits_parent', False),
('execution_failure_criteria', ['n']),
('notification_failure_criteria', ['n']),
('dependency_period', ''),
('explode_hostgroup', False),
])
def setUp(self):
super(TestServicedependency, self).setUp()
from alignak.objects.servicedependency import Servicedependency
self.item = Servicedependency({}, parsing=True)
|
class TestServicedependency(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 25 | 4 | 21 | 7 | 18 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,896 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestServiceescalation
|
class TestServiceescalation(PropertiesTester, AlignakTest):
unused_props = []
without_default = [
'host_name', 'hostgroup_name',
'service_description',
'first_notification', 'last_notification',
'first_notification_time', 'last_notification_time']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('notification_interval', 30),
('escalation_period', ''),
('escalation_options', ['w','x','c','r']),
('contacts', []),
('contact_groups', []),
])
def setUp(self):
super(TestServiceescalation, self).setUp()
from alignak.objects.serviceescalation import Serviceescalation
self.item = Serviceescalation({}, parsing=True)
|
class TestServiceescalation(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 27 | 4 | 23 | 7 | 20 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
3,897 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/downtime.py
|
alignak.downtime.Downtime
|
class Downtime(AlignakObject):
""" Schedules downtime for a specified service. If the "fixed" argument is set
to one (1), downtime will start and end at the times specified by the
"start" and "end" arguments.
Otherwise, downtime will begin between the "start" and "end" times and last
for "duration" seconds. The "start" and "end" arguments are specified
in time_t format (seconds since the UNIX epoch). The specified service
downtime can be triggered by another downtime entry if the "trigger_id"
is set to the ID of another scheduled downtime entry.
Set the "trigger_id" argument to zero (0) if the downtime for the
specified service should not be triggered by another downtime entry.
"""
my_type = 'downtime'
properties = {
'activate_me':
StringProp(default=u''),
'entry_time':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'fixed':
BoolProp(default=True, fill_brok=[FULL_STATUS]),
'start_time':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'duration':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'trigger_id':
StringProp(default=u''),
'end_time':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
'real_end_time':
IntegerProp(default=0),
'author':
StringProp(default=u'Alignak', fill_brok=[FULL_STATUS]),
'comment':
StringProp(default=u''),
'is_in_effect':
BoolProp(default=False),
'has_been_triggered':
BoolProp(default=False),
'can_be_deleted':
BoolProp(default=False),
'ref':
StringProp(default=u'unset'),
'ref_type':
StringProp(default=u'unset'),
'comment_id':
StringProp(default=u''),
}
def __init__(self, params, parsing=False):
creating = 'uuid' not in params
super(Downtime, self).__init__(params, parsing=parsing)
self.fill_default()
if creating:
self.activate_me = [] # The other downtimes i need to activate
self.entry_time = int(time.time())
if self.trigger_id not in ['', '0']: # triggered plus fixed makes no sense
self.fixed = False
if self.fixed:
self.duration = self.end_time - self.start_time
# This is important for flexible downtimes. Here start_time and
# end_time mean: in this time interval it is possible to trigger
# the beginning of the downtime which lasts for duration.
# Later, when a non-ok event happens, real_end_time will be
# recalculated from now+duration
# end_time will be displayed in the web interface, but real_end_time
# is used internally
self.real_end_time = self.end_time
self.is_in_effect = False
self.has_been_triggered = False # another downtime has triggered me
self.can_be_deleted = False
def __str__(self): # pragma: no cover
if self.is_in_effect is True:
active = "active"
else:
active = "inactive"
if self.fixed is True:
d_type = "fixed"
else:
d_type = "flexible"
return "%s %s Downtime id=%s %s - %s" % (
active, d_type, self.uuid, time.ctime(self.start_time), time.ctime(self.end_time))
def trigger_me(self, other_downtime):
"""Wrapper to activate_me.append function
Used to add another downtime to activate
:param other_downtime: other downtime to activate/cancel
:type other_downtime:
:return: None
"""
self.activate_me.append(other_downtime)
def in_scheduled_downtime(self):
"""Getter for is_in_effect attribute
:return: True if downtime is in effect, False otherwise
:rtype: bool
"""
return self.is_in_effect
def enter(self, timeperiods, hosts, services):
"""Set ref in scheduled downtime and raise downtime log entry (start)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: broks
:rtype: list of broks
"""
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
self.is_in_effect = True
if self.fixed is False:
now = time.time()
self.real_end_time = now + self.duration
item.scheduled_downtime_depth += 1
item.in_scheduled_downtime = True
if item.scheduled_downtime_depth == 1:
item.raise_enter_downtime_log_entry()
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = timeperiods[item.notification_period]
# Notification author data
# todo: note that alias and name are not implemented yet
author_data = {
'author': self.author, 'author_name': u'Not available',
'author_alias': u'Not available', 'author_comment': self.comment
}
item.create_notifications('DOWNTIMESTART', notification_period, hosts, services,
author_data=author_data)
if self.ref in hosts:
broks.append(self.get_raise_brok(item.get_name()))
# For an host, acknowledge the host problem (and its services problems)
# Acknowledge the host with a sticky ack and notifications
# The acknowledge will expire at the same time as the downtime end
item.acknowledge_problem(notification_period, hosts, services, 2, 1, "Alignak",
"Acknowledged because of an host downtime")
else:
broks.append(self.get_raise_brok(item.host_name, item.get_name()))
for downtime_id in self.activate_me:
for host in hosts:
if downtime_id in host.downtimes:
downtime = host.downtimes[downtime_id]
broks.extend(downtime.enter(timeperiods, hosts, services))
for service in services:
if downtime_id in service.downtimes:
downtime = service.downtimes[downtime_id]
broks.extend(downtime.enter(timeperiods, hosts, services))
return broks
def exit(self, timeperiods, hosts, services):
"""Remove ref in scheduled downtime and raise downtime log entry (exit)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always | None
:rtype: list
"""
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
# If not is_in_effect means that ot was probably a flexible downtime which was
# not triggered. In this case, nothing special to do...
if self.is_in_effect is True:
# This was a fixed or a flexible+triggered downtime
self.is_in_effect = False
item.scheduled_downtime_depth -= 1
if item.scheduled_downtime_depth == 0:
item.raise_exit_downtime_log_entry()
notification_period = timeperiods[item.notification_period]
# Notification author data
# todo: note that alias and name are not implemented yet
author_data = {
'author': self.author, 'author_name': u'Not available',
'author_alias': u'Not available', 'author_comment': self.comment
}
item.create_notifications(u'DOWNTIMEEND', notification_period, hosts, services,
author_data=author_data)
item.in_scheduled_downtime = False
if self.ref in hosts:
broks.append(self.get_expire_brok(item.get_name()))
else:
broks.append(self.get_expire_brok(item.host_name, item.get_name()))
item.del_comment(self.comment_id)
self.can_be_deleted = True
# when a downtime ends and the concerned item was a problem
# a notification should be sent with the next critical check
# So we should set a flag here which informs the consume_result function
# to send a notification
item.in_scheduled_downtime_during_last_check = True
return broks
def cancel(self, timeperiods, hosts, services):
"""Remove ref in scheduled downtime and raise downtime log entry (cancel)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always
:rtype: list
"""
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
self.is_in_effect = False
item.scheduled_downtime_depth -= 1
if item.scheduled_downtime_depth == 0:
item.raise_cancel_downtime_log_entry()
item.in_scheduled_downtime = False
if self.ref in hosts:
broks.append(self.get_expire_brok(item.get_name()))
else:
broks.append(self.get_expire_brok(item.host_name, item.get_name()))
self.del_automatic_comment(item)
self.can_be_deleted = True
item.in_scheduled_downtime_during_last_check = True
# Nagios does not notify on canceled downtimes
# res.extend(self.ref.create_notifications('DOWNTIMECANCELLED'))
# Also cancel other downtimes triggered by me
for downtime in self.activate_me:
broks.extend(downtime.cancel(timeperiods, hosts, services))
return broks
def add_automatic_comment(self, ref):
"""Add comment on ref for downtime
:param ref: the host/service we want to link a comment to
:type ref: alignak.objects.schedulingitem.SchedulingItem
:return: None
"""
if self.fixed is True:
text = (DOWNTIME_FIXED_MESSAGE % (ref.my_type,
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.start_time)),
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.end_time)),
ref.my_type))
else:
hours, remainder = divmod(self.duration, 3600)
minutes, _ = divmod(remainder, 60)
text = (DOWNTIME_FLEXIBLE_MESSAGE % (ref.my_type,
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.start_time)),
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(self.end_time)),
hours, minutes, ref.my_type))
data = {
'comment': text,
'comment_type': 1 if ref.my_type == 'host' else 2,
'entry_type': 2,
'source': 0,
'expires': False,
'ref': ref.uuid
}
comment = Comment(data)
self.comment_id = comment.uuid
ref.comments[comment.uuid] = comment
return comment
def del_automatic_comment(self, item):
"""Remove automatic comment on ref previously created
:param item: item service or host
:type item: object
:return: None
"""
item.del_comment(self.comment_id)
self.comment_id = ''
def fill_data_brok_from(self, data, brok_type):
"""Fill data with info of item by looking at brok_type
in props of properties or running_properties
:param data: data to fill
:type data:
:param brok_type: type of brok
:type brok_type: str
:return: None
TODO: Duplicate from Notification.fill_data_brok_from
"""
cls = self.__class__
# Now config properties
for prop, entry in list(cls.properties.items()):
if hasattr(prop, 'fill_brok'):
if brok_type in entry['fill_brok']:
data[prop] = getattr(self, prop)
def get_raise_brok(self, host_name, service_name=''):
"""Get a start downtime brok
:param host_name: host concerned by the downtime
:type host_name
:param service_name: service concerned by the downtime
:type service_name
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
data = self.serialize()
data['host'] = host_name
if service_name != '':
data['service'] = service_name
return Brok({'type': 'downtime_raise', 'data': data})
def get_expire_brok(self, host_name, service_name=''):
"""Get an expire downtime brok
:param host_name: host concerned by the downtime
:type host_name
:param service_name: service concerned by the downtime
:type service_name
:return: brok with wanted data
:rtype: alignak.brok.Brok
"""
data = self.serialize()
data['host'] = host_name
if service_name != '':
data['service'] = service_name
return Brok({'type': 'downtime_expire', 'data': data})
|
class Downtime(AlignakObject):
''' Schedules downtime for a specified service. If the "fixed" argument is set
to one (1), downtime will start and end at the times specified by the
"start" and "end" arguments.
Otherwise, downtime will begin between the "start" and "end" times and last
for "duration" seconds. The "start" and "end" arguments are specified
in time_t format (seconds since the UNIX epoch). The specified service
downtime can be triggered by another downtime entry if the "trigger_id"
is set to the ID of another scheduled downtime entry.
Set the "trigger_id" argument to zero (0) if the downtime for the
specified service should not be triggered by another downtime entry.
'''
def __init__(self, params, parsing=False):
pass
def __str__(self):
pass
def trigger_me(self, other_downtime):
'''Wrapper to activate_me.append function
Used to add another downtime to activate
:param other_downtime: other downtime to activate/cancel
:type other_downtime:
:return: None
'''
pass
def in_scheduled_downtime(self):
'''Getter for is_in_effect attribute
:return: True if downtime is in effect, False otherwise
:rtype: bool
'''
pass
def enter(self, timeperiods, hosts, services):
'''Set ref in scheduled downtime and raise downtime log entry (start)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: broks
:rtype: list of broks
'''
pass
def exit(self, timeperiods, hosts, services):
'''Remove ref in scheduled downtime and raise downtime log entry (exit)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always | None
:rtype: list
'''
pass
def cancel(self, timeperiods, hosts, services):
'''Remove ref in scheduled downtime and raise downtime log entry (cancel)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always
:rtype: list
'''
pass
def add_automatic_comment(self, ref):
'''Add comment on ref for downtime
:param ref: the host/service we want to link a comment to
:type ref: alignak.objects.schedulingitem.SchedulingItem
:return: None
'''
pass
def del_automatic_comment(self, item):
'''Remove automatic comment on ref previously created
:param item: item service or host
:type item: object
:return: None
'''
pass
def fill_data_brok_from(self, data, brok_type):
'''Fill data with info of item by looking at brok_type
in props of properties or running_properties
:param data: data to fill
:type data:
:param brok_type: type of brok
:type brok_type: str
:return: None
TODO: Duplicate from Notification.fill_data_brok_from
'''
pass
def get_raise_brok(self, host_name, service_name=''):
'''Get a start downtime brok
:param host_name: host concerned by the downtime
:type host_name
:param service_name: service concerned by the downtime
:type service_name
:return: brok with wanted data
:rtype: alignak.brok.Brok
'''
pass
def get_expire_brok(self, host_name, service_name=''):
'''Get an expire downtime brok
:param host_name: host concerned by the downtime
:type host_name
:param service_name: service concerned by the downtime
:type service_name
:return: brok with wanted data
:rtype: alignak.brok.Brok
'''
pass
| 13 | 11 | 24 | 2 | 14 | 8 | 4 | 0.54 | 1 | 5 | 2 | 0 | 12 | 10 | 12 | 15 | 345 | 37 | 203 | 53 | 190 | 109 | 134 | 52 | 121 | 11 | 2 | 3 | 42 |
3,898 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/eventhandler.py
|
alignak.eventhandler.EventHandler
|
class EventHandler(Action):
"""Notification class, inherits from action class. Used to execute action
when a host or a service is in a bad state
"""
# AutoSlots create the __slots__ with properties and
# running_properties names
__metaclass__ = AutoSlots
my_type = 'eventhandler'
properties = Action.properties.copy()
properties.update({
'is_a':
StringProp(default=u'eventhandler'),
'is_snapshot':
BoolProp(default=False),
})
def __init__(self, params=None, parsing=False):
super(EventHandler, self).__init__(params, parsing=parsing)
self.fill_default()
# An event handler must be launched as soon as possible
self.t_to_go = time.time()
print("EH: %s" % self.__dict__)
def __str__(self): # pragma: no cover
return "Event Handler %s, item: %s, status: %s command: %s" \
% (self.uuid, self.ref, self.status, self.command)
def get_return_from(self, e_handler):
"""Setter of the following attributes::
* exit_status
* output
* long_output
* check_time
* execution_time
* perf_data
:param e_handler: event handler to get data from
:type e_handler: alignak.eventhandler.EventHandler
:return: None
"""
for prop in ['exit_status', 'output', 'long_output', 'check_time', 'execution_time',
'perf_data']:
setattr(self, prop, getattr(e_handler, prop))
def get_outputs(self, out, max_plugins_output_length):
"""Setter of output attribute
:param out: new output
:type out: str
:param max_plugins_output_length: not used
:type max_plugins_output_length: int
:return: None
"""
self.output = out
|
class EventHandler(Action):
'''Notification class, inherits from action class. Used to execute action
when a host or a service is in a bad state
'''
def __init__(self, params=None, parsing=False):
pass
def __str__(self):
pass
def get_return_from(self, e_handler):
'''Setter of the following attributes::
* exit_status
* output
* long_output
* check_time
* execution_time
* perf_data
:param e_handler: event handler to get data from
:type e_handler: alignak.eventhandler.EventHandler
:return: None
'''
pass
def get_outputs(self, out, max_plugins_output_length):
'''Setter of output attribute
:param out: new output
:type out: str
:param max_plugins_output_length: not used
:type max_plugins_output_length: int
:return: None
'''
pass
| 5 | 3 | 10 | 1 | 4 | 5 | 1 | 1.04 | 1 | 1 | 0 | 0 | 4 | 2 | 4 | 19 | 61 | 13 | 24 | 11 | 19 | 25 | 17 | 11 | 12 | 2 | 4 | 1 | 5 |
3,899 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/external_command.py
|
alignak.external_command.ExternalCommand
|
class ExternalCommand(object):
"""ExternalCommand class is only an object with a cmd_line attribute.
All parsing and execution is done in manager
"""
my_type = 'externalcommand'
def __init__(self, cmd_line, timestamp=None):
self.cmd_line = cmd_line
try:
self.cmd_line = self.cmd_line.decode('utf8', 'ignore')
except UnicodeEncodeError:
pass
except AttributeError:
# Python 3 will raise an exception
pass
self.creation_timestamp = timestamp or time.time()
def serialize(self, no_json=True, printing=False):
"""This function serializes into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Brok
:rtype: dict
"""
return {"my_type": self.my_type, "cmd_line": self.cmd_line,
"creation_timestamp": self.creation_timestamp}
|
class ExternalCommand(object):
'''ExternalCommand class is only an object with a cmd_line attribute.
All parsing and execution is done in manager
'''
def __init__(self, cmd_line, timestamp=None):
pass
def serialize(self, no_json=True, printing=False):
'''This function serializes into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Brok
:rtype: dict
'''
pass
| 3 | 2 | 11 | 1 | 6 | 4 | 2 | 0.71 | 1 | 2 | 0 | 0 | 2 | 2 | 2 | 2 | 29 | 5 | 14 | 6 | 11 | 10 | 13 | 6 | 10 | 3 | 1 | 1 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.