id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
145,748 |
Linaro/squad
|
Linaro_squad/squad/ci/migrations/0003_backend_name.py
|
squad.ci.migrations.0003_backend_name.Migration
|
class Migration(migrations.Migration):
dependencies = [
('ci', '0002_auto_20170406_1252'),
]
operations = [
migrations.AddField(
model_name='backend',
name='name',
field=models.CharField(default='unnamed', max_length=128, unique=True),
preserve_default=False,
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 2 | 12 | 3 | 11 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,749 |
Linaro/squad
|
Linaro_squad/squad/ci/migrations/0001_initial.py
|
squad.ci.migrations.0001_initial.Migration
|
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0020_build_ordering'),
]
operations = [
migrations.CreateModel(
name='Backend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
('username', models.CharField(max_length=128)),
('token', models.CharField(max_length=1024)),
('implementation_type', models.CharField(choices=[('lava', 'LAVA'), ('null', 'None')], default='null', max_length=64)),
('poll_interval', models.IntegerField(default=60)),
],
),
migrations.CreateModel(
name='TestJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('build', models.TextField()),
('environment', models.CharField(max_length=100, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9][a-zA-Z0-9_-]+')])),
('definition', models.TextField()),
('submitted', models.BooleanField(default=False)),
('fetched', models.BooleanField(default=False)),
('last_fetch_attempt', models.DateTimeField(blank=True, default=None, null=True)),
('job_id', models.CharField(blank=True, max_length=128, null=True)),
('job_status', models.CharField(blank=True, max_length=128, null=True)),
('backend', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_jobs', to='ci.Backend')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Project')),
],
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 3 | 34 | 4 | 33 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
145,750 |
Linaro/squad
|
Linaro_squad/squad/api/utils.py
|
squad.api.utils.DisabledHTMLFilterBackend
|
class DisabledHTMLFilterBackend(RestFrameworkFilterBackend):
def to_html(self, request, queryset, view):
return ""
|
class DisabledHTMLFilterBackend(RestFrameworkFilterBackend):
def to_html(self, request, queryset, view):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 4 | 1 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
145,751 |
Linaro/squad
|
Linaro_squad/squad/api/utils.py
|
squad.api.utils.CursorPaginationWithPageSize
|
class CursorPaginationWithPageSize(CursorPagination):
page_size_query_param = 'limit'
ordering = '-id'
|
class CursorPaginationWithPageSize(CursorPagination):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,752 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/fill_test_metadata.py
|
squad.core.management.commands.fill_test_metadata.Command
|
class Command(BaseCommand):
help = """Get or create SuiteMetadata objects to fill tests that have metadata == NULL"""
def add_arguments(self, parser):
parser.add_argument('--batch-size', type=int, help='How many tests to process at once. Use this to prevent OOM errors')
parser.add_argument('--show-progress', action='store_true', help='Prints out one dot every 1000 (one thousand) tests processed')
def handle(self, *args, **options):
show_progress = options['show_progress']
batch_size = options['batch_size']
logger.info("Filling metadata for %s tests" % (batch_size if batch_size else 'all'))
tests = Test.objects.filter(metadata__isnull=True).prefetch_related('suite')
if batch_size:
tests = tests[:batch_size]
count_processed = 0
for test in tests:
metadata, _ = SuiteMetadata.objects.get_or_create(suite=test.suite.slug, name=test.name, kind='test')
test.metadata = metadata
test.save()
count_processed += 1
if count_processed % 1000 == 0 and show_progress:
print('.', end='', flush=True)
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 11 | 2 | 9 | 0 | 3 | 0 | 1 | 3 | 2 | 0 | 2 | 0 | 2 | 2 | 26 | 6 | 20 | 10 | 17 | 0 | 20 | 10 | 17 | 5 | 1 | 2 | 6 |
145,753 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/fix_squadplugin_data.py
|
squad.core.management.commands.fix_squadplugin_data.Command
|
class Command(BaseCommand):
help = """helper that fixes buggy SuiteMetadata objects"""
def add_arguments(self, parser):
parser.add_argument(
'--show-progress',
action='store_true',
help='Prints out one dot every 1000 (one thousand) metadata processed'
)
parser.add_argument(
'--num-threads',
type=int,
default=2,
help='Number of simultaneous parallel threads to work'
)
def handle(self, *args, **options):
show_progress = options['show_progress']
num_threads = options['num_threads']
logger.info('Discovering number of metadata that need work...')
count = int(SuiteMetadata.objects.filter(buggy_ones).count())
if count == 0:
logger.info('Nothing to do!')
return
logger.info('Working on %d metadatas' % count)
metadata_ids = SuiteMetadata.objects.filter(buggy_ones).order_by('-id').values_list('id', flat=True)
chunk_size = math.floor(len(metadata_ids) / num_threads) + 1
chunks = split_list(metadata_ids, chunk_size=chunk_size)
threads = []
for chunk in chunks:
thread_id = len(threads)
thread = SuiteMetadataFixThread(thread_id, chunk, show_progress=show_progress)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Done updating')
# Check that everything worked as expected
count = int(SuiteMetadata.objects.filter(buggy_ones).count())
if count > 0:
logger.error('Something went wrong! %d metadata are still buggy' % count)
return
logger.info('Done!')
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 24 | 5 | 19 | 1 | 3 | 0.03 | 1 | 3 | 2 | 0 | 2 | 0 | 2 | 2 | 53 | 12 | 40 | 14 | 37 | 1 | 31 | 14 | 28 | 5 | 1 | 1 | 6 |
145,754 |
Linaro/squad
|
Linaro_squad/squad/api/utils.py
|
squad.api.utils.BrowsableAPIRendererWithoutForms
|
class BrowsableAPIRendererWithoutForms(BrowsableAPIRenderer):
"""Renders the browsable api, but excludes the forms."""
def get_context(self, *args, **kwargs):
ctx = super().get_context(*args, **kwargs)
ctx['display_edit_forms'] = False
return ctx
def show_form_for_method(self, view, method, request, obj):
"""We never want to do this! So just return False."""
return False
def get_rendered_html_form(self, data, view, method, request):
"""Why render _any_ forms at all. This method should return
rendered HTML, so let's simply return an empty string.
"""
return ""
|
class BrowsableAPIRendererWithoutForms(BrowsableAPIRenderer):
'''Renders the browsable api, but excludes the forms.'''
def get_context(self, *args, **kwargs):
pass
def show_form_for_method(self, view, method, request, obj):
'''We never want to do this! So just return False.'''
pass
def get_rendered_html_form(self, data, view, method, request):
'''Why render _any_ forms at all. This method should return
rendered HTML, so let's simply return an empty string.
'''
pass
| 4 | 3 | 4 | 0 | 3 | 1 | 1 | 0.56 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 3 | 17 | 3 | 9 | 5 | 5 | 5 | 9 | 5 | 5 | 1 | 1 | 0 | 3 |
145,755 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.TestViewSet
|
class TestViewSet(NestedViewSetMixin, ModelViewSet):
queryset = Test.objects.prefetch_related('metadata').all()
project_lookup_key = 'build__project__in'
serializer_class = TestSerializer
filterset_class = TestFilter
filter_class = filterset_class # TODO: remove when django-filters 1.x is not supported anymore
pagination_class = CursorPaginationWithPageSize
ordering = ('-id',)
def get_queryset(self):
# Squeeze a few ms from this query if user wants less fields
fields = self.request.query_params.get('fields')
queryset = super().get_queryset()
if fields:
fields = fields.split(',')
basic_fields = ['build', 'environment', 'test_run', 'suite', 'log']
for field in basic_fields:
if field not in fields:
queryset = queryset.defer(field)
if 'known_issues' in fields:
queryset = queryset.prefetch_related('known_issues')
# 'status' depends on 'result' and 'has_known_issues'
if 'status' not in fields:
if 'result' not in fields:
queryset = queryset.defer('result')
if 'has_known_issues' not in fields:
queryset = queryset.defer('has_known_issues')
else:
queryset = queryset.prefetch_related('known_issues')
return queryset
|
class TestViewSet(NestedViewSetMixin, ModelViewSet):
def get_queryset(self):
pass
| 2 | 0 | 26 | 5 | 19 | 2 | 8 | 0.11 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 3 | 36 | 7 | 27 | 13 | 25 | 3 | 26 | 13 | 24 | 8 | 2 | 3 | 8 |
145,756 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.TestSerializer
|
class TestSerializer(DynamicFieldsModelSerializer, serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
def __init__(self, *args, **kwargs):
remove_fields = kwargs.pop('remove_fields', None)
super(TestSerializer, self).__init__(*args, **kwargs)
if remove_fields:
# for multiple fields in a list
for field_name in remove_fields:
self.fields.pop(field_name)
name = serializers.CharField(source='full_name', read_only=True)
short_name = serializers.CharField(source='name')
status = serializers.CharField(read_only=True)
class Meta:
model = Test
fields = '__all__'
|
class TestSerializer(DynamicFieldsModelSerializer, serializers.HyperlinkedModelSerializer):
def __init__(self, *args, **kwargs):
pass
class Meta:
| 3 | 0 | 7 | 0 | 6 | 1 | 3 | 0.07 | 2 | 1 | 0 | 1 | 1 | 0 | 1 | 2 | 19 | 4 | 14 | 10 | 11 | 1 | 14 | 10 | 11 | 3 | 2 | 2 | 3 |
145,757 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.TestNameSerializer
|
class TestNameSerializer(serializers.BaseSerializer):
name = serializers.CharField(read_only=True)
|
class TestNameSerializer(serializers.BaseSerializer):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
145,758 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.SuiteMetadataViewset
|
class SuiteMetadataViewset(viewsets.ModelViewSet):
queryset = SuiteMetadata.objects
serializer_class = SuiteMetadataSerializer
filterset_fields = ('suite', 'kind', 'name')
filter_fields = filterset_fields # TODO: remove when django-filters 1.x is not supported anymore
filterset_class = SuiteMetadataFilter
filter_class = filterset_class # TODO: remove when django-filters 1.x is not supported anymore
ordering_fields = ('name', 'suite', 'id')
pagination_class = CursorPaginationWithPageSize
ordering = ('id',)
def get_queryset(self):
request = self.request
suites_qs = self.queryset
project_ids = request.query_params.get("project", None)
project_qs = Project.objects.all()
try:
if project_ids:
projects = project_ids.split(",")
project_qs = project_qs.filter(id__in=projects)
suites_names = Suite.objects.filter(project__in=project_qs).values_list('slug')
suites_qs = suites_qs.filter(suite__in=suites_names)
except ValueError as e:
logger.warning(e)
return suites_qs
|
class SuiteMetadataViewset(viewsets.ModelViewSet):
def get_queryset(self):
pass
| 2 | 0 | 15 | 1 | 14 | 0 | 3 | 0.08 | 1 | 3 | 2 | 0 | 1 | 0 | 1 | 1 | 26 | 2 | 24 | 18 | 22 | 2 | 24 | 17 | 22 | 3 | 1 | 2 | 3 |
145,759 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.StatusViewSet
|
class StatusViewSet(NestedViewSetMixin, ModelViewSet):
queryset = Status.objects.all()
serializer_class = StatusSerializer
filterset_class = StatusFilter
filter_class = filterset_class # TODO: remove when django-filters 1.x is not supported anymore
pagination_class = CursorPaginationWithPageSize
ordering = ('id',)
|
class StatusViewSet(NestedViewSetMixin, ModelViewSet):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 8 | 1 | 7 | 7 | 6 | 1 | 7 | 7 | 6 | 0 | 2 | 0 | 0 |
145,760 |
Linaro/squad
|
Linaro_squad/squad/celery.py
|
squad.celery.MemoryUseLoggingTask
|
class MemoryUseLoggingTask(Task):
def __call__(self, *args, **kwargs):
ram0 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # in kB
try:
return super(MemoryUseLoggingTask, self).__call__(*args, **kwargs)
finally:
ram = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # in kB
diff = ram - ram0
if diff >= 1024: # 1024kB = 1048576 (1MB)
logger = logging.getLogger()
mb = diff / 1024
logger.warning('Task %s%r consumed %dMB of memory', self.name, args, mb)
|
class MemoryUseLoggingTask(Task):
def __call__(self, *args, **kwargs):
pass
| 2 | 0 | 11 | 0 | 11 | 3 | 2 | 0.25 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 13 | 1 | 12 | 7 | 10 | 3 | 11 | 7 | 9 | 2 | 1 | 2 | 2 |
145,761 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.ProjectStatusViewSet
|
class ProjectStatusViewSet(viewsets.ModelViewSet):
queryset = ProjectStatus.objects
serializer_class = ProjectStatusSerializer
filterset_fields = ('build',)
filter_fields = filterset_fields # TODO: remove when django-filters 1.x is not supported anymore
filterset_class = ProjectStatusFilter
filter_class = filterset_class # TODO: remove when django-filters 1.x is not supported anymore
ordering_fields = ('created_at', 'last_updated')
|
class ProjectStatusViewSet(viewsets.ModelViewSet):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 1 | 8 | 8 | 7 | 2 | 8 | 8 | 7 | 0 | 1 | 0 | 0 |
145,762 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/fix_squadplugin_data.py
|
squad.core.management.commands.fix_squadplugin_data.SuiteMetadataFixThread
|
class SuiteMetadataFixThread(threading.Thread):
def __init__(self, thread_id, suitemetadata_ids, show_progress=False):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.suitemetadata_ids = suitemetadata_ids
self.show_progress = show_progress
def run(self):
count = len(self.suitemetadata_ids)
logger.info('[thread-%s] processing %d suitemetadata' % (self.thread_id, count))
orphan_metadata = []
for offset in range(0, count, STEP):
ids = self.suitemetadata_ids[offset:offset + STEP]
for metadata in SuiteMetadata.objects.filter(id__in=ids).annotate(**annotations).all():
# It means there's no SuiteMetadata with fixed suite, so it's safe to change it in place
if metadata.correct_metadata_id is None:
if metadata.correct_suite_slug is None:
orphan_metadata.append(metadata.id)
else:
try:
metadata.suite = metadata.correct_suite_slug
metadata.save()
except IntegrityError:
logger.error('There appears to have a fixed suite metadata already')
logger.error('This was not supposed to happen though, check these cases carefuly')
logger.error('SuiteMetadata (id: %d, kind=test, suite="%s", name="%s")' % (metadata.id, metadata.suite, metadata.name))
return
# It means there's a correct one, so just update tests
else:
Test.objects.order_by().filter(metadata=metadata).update(metadata_id=metadata.correct_metadata_id)
# It's safe to delete buggy metadata now
orphan_metadata.append(metadata.id)
if self.show_progress:
print('.', end='', flush=True)
if len(orphan_metadata) > 0:
logger.info('Deleting %d orphan metadata objects' % len(orphan_metadata))
chunks = split_list(orphan_metadata, chunk_size=10000)
for chunk in chunks:
SuiteMetadata.objects.filter(id__in=chunk).delete()
logger.info('[thread-%s] done updating' % self.thread_id)
|
class SuiteMetadataFixThread(threading.Thread):
def __init__(self, thread_id, suitemetadata_ids, show_progress=False):
pass
def run(self):
pass
| 3 | 0 | 21 | 2 | 18 | 2 | 5 | 0.08 | 1 | 3 | 2 | 0 | 2 | 3 | 2 | 27 | 43 | 4 | 36 | 13 | 33 | 3 | 34 | 13 | 31 | 9 | 1 | 5 | 10 |
145,763 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/import_data.py
|
squad.core.management.commands.import_data.Command
|
class Command(BaseCommand):
help = """Import data from DIRECTORY into PROJECT. See
squad/core/management/commands/import_data.rst for documentation on the
expected format"""
def add_arguments(self, parser):
parser.add_argument(
'--dry-run', '-d',
action='store_true',
dest='dry_run',
help='dry run (i.e. don\'t really do the importing)',
)
parser.add_argument(
'--silent', '-s',
action='store_true',
dest='silent',
help='operate silently (i.e. don\'t output anything)',
)
parser.add_argument(
'PROJECT',
help='Target project, on the form $group/$project',
)
parser.add_argument(
'DIRECTORY',
help='Input directory with the files to import',
)
def handle(self, *args, **options):
self.options = options
if not self.options['dry_run']:
group_id, project_id = options['PROJECT'].split('/')
self.group, _ = Group.objects.get_or_create(slug=group_id)
self.project, _ = self.group.projects.get_or_create(slug=project_id)
self.receive_test_run = ReceiveTestRun(self.project)
if not self.options['silent']:
print()
msg = "Importing project: %s" % options["DIRECTORY"]
print(msg)
print('-' * len(msg))
print()
builds = sorted(glob(os.path.join(options['DIRECTORY'], '*')), key=build_key)
total = len(builds)
i = 0
for directory in builds:
i += 1
if not self.options['silent']:
print("I: importing build %d/%d" % (i, total))
self.import_build(directory)
def import_build(self, directory):
build_id = os.path.basename(directory)
for envdir in glob(os.path.join(directory, '*')):
self.import_environment(build_id, envdir)
if not self.options['dry_run']:
try:
build = self.project.builds.get(version=build_id)
build.created_at = build.datetime
build.save()
status = build.status
status.created_at = build.datetime
status.last_updated = build.datetime
status.save()
except Build.DoesNotExist:
# build may not exist if all test runs were missing metadata
pass
def import_environment(self, build_id, directory):
environment_slug = os.path.basename(directory)
for testrundir in glob(os.path.join(directory, '*')):
self.import_testrun(build_id, environment_slug, testrundir)
def import_testrun(self, build_id, environment_slug, directory):
# mandatory
metadata_path = os.path.join(directory, 'metadata.json')
if not os.path.exists(metadata_path):
if not self.options['silent']:
print('W: test run has not metadata, ignoring: %s' % directory)
return
metadata = open(metadata_path).read()
try:
metrics = open(os.path.join(directory, 'metrics.json')).read()
except FileNotFoundError:
metrics = None
try:
tests = open(os.path.join(directory, 'tests.json')).read()
except FileNotFoundError:
tests = None
attachments = {}
for f in glob(os.path.join(directory, '*')):
name = os.path.basename(f)
if name not in ['metrics.json', 'metadata.json', 'tests.json']:
attachments[name] = open(f, 'rb').read()
if not self.options['silent']:
print("Importing test run: %s" % directory)
if self.options['dry_run']:
return
self.receive_test_run(
version=build_id,
environment_slug=environment_slug,
metadata_file=metadata,
metrics_file=metrics,
tests_file=tests,
attachments=attachments,
)
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
def import_build(self, directory):
pass
def import_environment(self, build_id, directory):
pass
def import_testrun(self, build_id, environment_slug, directory):
pass
| 6 | 0 | 22 | 3 | 19 | 0 | 4 | 0.02 | 1 | 3 | 2 | 0 | 5 | 4 | 5 | 5 | 118 | 19 | 97 | 30 | 91 | 2 | 72 | 30 | 66 | 9 | 1 | 2 | 21 |
145,764 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/migrate_test_runs.py
|
squad.core.management.commands.migrate_test_runs.Command
|
class Command(BaseCommand):
help = """Move test runs identified by environment slug
from one project to another. This action preserves
datetime of the objects and statuses."""
def add_arguments(self, parser):
parser.add_argument(
'--old-project-slug',
dest="old_project_slug",
help="Slug of the project from which to migrate test runs"
)
parser.add_argument(
'--new-project-slug',
dest="new_project_slug",
help="Slug of the project to which to migrate test runs"
)
parser.add_argument(
'--env-slug',
dest="env_slug",
help="Slug of the environment to migrate to new project"
)
def handle(self, *args, **options):
self.options = options
if not self.options['old_project_slug']:
print("ERROR: old_project_slug missing")
sys.exit(1)
if not self.options['new_project_slug']:
print("ERROR: new_project_slug missing")
sys.exit(1)
if not self.options['env_slug']:
print("ERROR: env_slug missing")
sys.exit(1)
old_project = None
new_project = None
env = None
try:
old_project = Project.objects.get(slug=self.options['old_project_slug'])
except ObjectDoesNotExist:
print("Project: %s not found. Exiting" % self.options['old_project_slug'])
sys.exit(0)
try:
new_project = Project.objects.get(slug=self.options['new_project_slug'])
except ObjectDoesNotExist:
print("Project: %s not found. Exiting" % self.options['new_project_slug'])
sys.exit(0)
try:
env = Environment.objects.get(project=old_project, slug=self.options['env_slug'])
except ObjectDoesNotExist:
print("Environment: %s not found. Exiting" % self.options['env_slug'])
sys.exit(0)
print("Migrating testruns from project %s to %s" % (old_project.slug, new_project.slug))
print("All test runs with environment name: %s will be migrated" % env.slug)
self.__handle__(old_project, new_project, env)
@transaction.atomic
def __handle__(self, old_project, new_project, env):
for build in old_project.builds.all():
if build.test_runs.filter(environment=env):
print("moving build: %s" % build)
new_build, _ = Build.objects.get_or_create(
version=build.version,
datetime=build.datetime,
project=new_project,
created_at=build.created_at)
for testrun in build.test_runs.filter(environment=env):
testrun.build = new_build
testrun.save()
testrun.environment.project = new_project
testrun.environment.save()
for testjob in testrun.test_jobs.all():
testjob.target = new_project
testjob.save()
UpdateProjectStatus()(testrun)
new_build.status.created_at = build.status.created_at
new_build.status.last_updated = build.status.last_updated
new_build.status.save()
else:
print("No matching test runs found in build: %s" % build)
env.project = new_project
env.save()
for suite in old_project.suites.all():
new_suite, _ = new_project.suites.get_or_create(
slug=suite.slug,
defaults={'name': suite.name}
)
for model in [Status, Test, Metric]:
model.objects.filter(
suite=suite,
test_run__build__project_id=new_project.id,
).update(suite=new_suite)
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
@transaction.atomic
def __handle__(self, old_project, new_project, env):
pass
| 5 | 0 | 30 | 2 | 28 | 0 | 5 | 0 | 1 | 7 | 7 | 0 | 3 | 1 | 3 | 3 | 98 | 10 | 88 | 17 | 83 | 0 | 62 | 16 | 58 | 7 | 1 | 4 | 15 |
145,765 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/populate_metric_build_and_environment.py
|
squad.core.management.commands.populate_metric_build_and_environment.Command
|
class Command(BaseCommand):
help = """helper that populates build and environment columns in metric table"""
def add_arguments(self, parser):
parser.add_argument(
'--show-progress',
action='store_true',
help='Prints out one dot every 1000 (one thousand) testruns processed'
)
parser.add_argument(
'--num-threads',
type=int,
default=2,
help='Number of simultaneous parallel threads to work'
)
def handle(self, *args, **options):
show_progress = options['show_progress']
num_threads = options['num_threads']
logger.info('Discovering number of metrics that need work...')
count = Metric.objects.filter(build__isnull=True, environment__isnull=True).count()
if count == 0:
logger.info('Nothing to do!')
return
logger.info('Working on %d metrics' % count)
testrun_ids = TestRun.objects.order_by('-id').values_list('id', flat=True)
chunk_size = math.floor(len(testrun_ids) / num_threads) + 1
chunks = split_list(testrun_ids, chunk_size=chunk_size)
threads = []
for chunk in chunks:
thread_id = len(threads)
total_updates[thread_id] = 0
thread = DataFillerThread(thread_id, chunk, show_progress=show_progress)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Done updating %d metrics' % sum(total_updates.values()))
# Check that everything worked as expected
count = Metric.objects.filter(build__isnull=True, environment__isnull=True).count()
if count > 0:
logger.error('Something went wrong! %d metrics still do not have build and environment filled out' % count)
return
logger.info('Done!')
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 25 | 5 | 20 | 1 | 3 | 0.02 | 1 | 4 | 3 | 0 | 2 | 0 | 2 | 2 | 54 | 12 | 41 | 14 | 38 | 1 | 32 | 14 | 29 | 5 | 1 | 1 | 6 |
145,766 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/populate_metric_build_and_environment.py
|
squad.core.management.commands.populate_metric_build_and_environment.DataFillerThread
|
class DataFillerThread(threading.Thread):
def __init__(self, thread_id, testrun_ids, show_progress=False):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.testrun_ids = testrun_ids
self.show_progress = show_progress
def run(self):
count = len(self.testrun_ids)
logger.info('[thread-%s] processing %d testruns' % (self.thread_id, count))
count_updates = 0
for offset in range(0, count, STEP):
ids = self.testrun_ids[offset:offset + STEP]
for testrun in TestRun.objects.filter(id__in=ids).only('build_id', 'environment_id').all():
count_updates += testrun.metrics.update(build_id=testrun.build_id, environment_id=testrun.environment_id)
if self.show_progress:
print('.', end='', flush=True)
total_updates[self.thread_id] = count_updates
logger.info('[thread-%s] done updating %d metrics' % (self.thread_id, count_updates))
|
class DataFillerThread(threading.Thread):
def __init__(self, thread_id, testrun_ids, show_progress=False):
pass
def run(self):
pass
| 3 | 0 | 10 | 1 | 9 | 0 | 3 | 0 | 1 | 2 | 1 | 0 | 2 | 3 | 2 | 27 | 21 | 3 | 18 | 11 | 15 | 0 | 18 | 11 | 15 | 4 | 1 | 2 | 5 |
145,767 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/populate_test_build_and_environment.py
|
squad.core.management.commands.populate_test_build_and_environment.Command
|
class Command(BaseCommand):
help = """helper that populates build and environment columns in test table"""
def add_arguments(self, parser):
parser.add_argument(
'--show-progress',
action='store_true',
help='Prints out one dot every 1000 (one thousand) testruns processed'
)
parser.add_argument(
'--num-threads',
type=int,
default=2,
help='Number of simultaneous parallel threads to work'
)
def handle(self, *args, **options):
show_progress = options['show_progress']
num_threads = options['num_threads']
logger.info('Discovering number of tests that need work...')
count = Test.objects.filter(build__isnull=True, environment__isnull=True).count()
if count == 0:
logger.info('Nothing to do!')
return
logger.info('Working on %d tests' % count)
testrun_ids = TestRun.objects.order_by('-id').values_list('id', flat=True)
chunk_size = math.floor(len(testrun_ids) / num_threads) + 1
chunks = split_list(testrun_ids, chunk_size=chunk_size)
threads = []
for chunk in chunks:
thread_id = len(threads)
total_updates[thread_id] = 0
thread = DataFillerThread(thread_id, chunk, show_progress=show_progress)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Done updating %d tests' % sum(total_updates.values()))
# Check that everything worked as expected
count = Test.objects.filter(build__isnull=True, environment__isnull=True).count()
if count > 0:
logger.error('Something went wrong! %d tests still do not have build and environment filled out' % count)
return
logger.info('Done!')
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 25 | 5 | 20 | 1 | 3 | 0.02 | 1 | 4 | 3 | 0 | 2 | 0 | 2 | 2 | 54 | 12 | 41 | 14 | 38 | 1 | 32 | 14 | 29 | 5 | 1 | 1 | 6 |
145,768 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/populate_test_build_and_environment.py
|
squad.core.management.commands.populate_test_build_and_environment.DataFillerThread
|
class DataFillerThread(threading.Thread):
def __init__(self, thread_id, testrun_ids, show_progress=False):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.testrun_ids = testrun_ids
self.show_progress = show_progress
def run(self):
count = len(self.testrun_ids)
logger.info('[thread-%s] processing %d testruns' % (self.thread_id, count))
count_updates = 0
for offset in range(0, count, STEP):
ids = self.testrun_ids[offset:offset + STEP]
for testrun in TestRun.objects.filter(id__in=ids).only('build_id', 'environment_id').all():
count_updates += testrun.tests.update(build_id=testrun.build_id, environment_id=testrun.environment_id)
if self.show_progress:
print('.', end='', flush=True)
total_updates[self.thread_id] = count_updates
logger.info('[thread-%s] done updating %d tests' % (self.thread_id, count_updates))
|
class DataFillerThread(threading.Thread):
def __init__(self, thread_id, testrun_ids, show_progress=False):
pass
def run(self):
pass
| 3 | 0 | 10 | 1 | 9 | 0 | 3 | 0 | 1 | 2 | 1 | 0 | 2 | 3 | 2 | 27 | 21 | 3 | 18 | 11 | 15 | 0 | 18 | 11 | 15 | 4 | 1 | 2 | 5 |
145,769 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/prepdump.py
|
squad.core.management.commands.prepdump.Command
|
class Command(BaseCommand):
def handle_subscriptions(self, collection):
username = os.getenv('USER')
email = '%s@localhost' % username
for sub in collection:
sub.email = email
sub.save()
def handle(self, *args, **options):
self.handle_subscriptions(models.Subscription.objects.all())
self.handle_subscriptions(models.AdminSubscription.objects.all())
|
class Command(BaseCommand):
def handle_subscriptions(self, collection):
pass
def handle_subscriptions(self, collection):
pass
| 3 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 2 | 2 | 0 | 2 | 0 | 2 | 2 | 12 | 2 | 10 | 6 | 7 | 0 | 10 | 6 | 7 | 2 | 1 | 1 | 3 |
145,770 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/send-email.py
|
squad.core.management.commands.send-email.Command
|
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--dry-run', '-d',
action='store_true',
dest='dry_run',
help='dry run (i.e. don\'t really do the importing)',
)
parser.add_argument(
'--silent', '-s',
action='store_true',
dest='silent',
help='operate silently (i.e. don\'t output anything)',
)
parser.add_argument(
'PROJECT',
help='Target project, on the form $group/$project',
)
parser.add_argument(
'BUILD',
help='Build id (version)',
)
def handle(self, *args, **options):
g, p = options['PROJECT'].split('/')
group = Group.objects.get(slug=g)
project = group.projects.get(slug=p)
build = project.builds.get(version=options['BUILD'])
send_status_notification(build.status)
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 16 | 2 | 14 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 34 | 6 | 28 | 7 | 25 | 0 | 12 | 7 | 9 | 1 | 1 | 0 | 2 |
145,771 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/update_project_statuses.py
|
squad.core.management.commands.update_project_statuses.Command
|
class Command(BaseCommand):
help = """Update project status records in order to fix a bug which compared current builds with the wrong ones."""
def add_arguments(self, parser):
parser.add_argument(
'--date-start',
dest="date_start",
default=(datetime.now() - timedelta(days=180)),
type=valid_date,
help="Start date for project status updates (default: 6 months before current date, format: YYYY-MM-DD)."
)
parser.add_argument(
'--date-end',
dest="date_end",
default=datetime.now(),
type=valid_date,
help="End date for project status updates (default: today, format: YYYY-MM-DD)."
)
def handle(self, *args, **options):
self.options = options
builds = Build.objects.filter(
datetime__range=(
timezone.make_aware(self.options['date_start']),
timezone.make_aware(self.options['date_end'])),
status__finished=True
)
total = builds.count()
logger.info("Updating ProjectStatus objects from builds...")
logger.info("Total build count in selected date range: %s" % total)
for index, build in enumerate(builds.iterator()):
ProjectStatus.create_or_update(build)
if index % 100 == 0:
logger.info('Progress: {1:>2}%[{0:10}]'.format(
'#' * int((index + 1) * 10 / total),
int((index + 1) * 100 / total)))
logger.info('Progress: {1:>2}%[{0:10}]'.format('#' * 10, 100))
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 18 | 2 | 17 | 1 | 2 | 0.06 | 1 | 6 | 2 | 0 | 2 | 1 | 2 | 2 | 41 | 6 | 35 | 8 | 32 | 2 | 16 | 8 | 13 | 3 | 1 | 2 | 4 |
145,772 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.ProjectStatusSerializer
|
class ProjectStatusSerializer(DynamicFieldsModelSerializer, serializers.HyperlinkedModelSerializer):
def to_representation(self, instance):
ret = super().to_representation(instance)
enriched_details = self.context.get('enriched_details', None)
if instance.regressions is not None:
regressions = instance.get_regressions()
if enriched_details:
for env in enriched_details.keys():
env_regressions = regressions.get(env, None)
if env_regressions:
enriched_details[env].update({'regressions': env_regressions})
ret['regressions'] = json.dumps(regressions)
if instance.fixes is not None:
fixes = instance.get_fixes()
if enriched_details:
for env in enriched_details.keys():
env_fixes = fixes.get(env, None)
if env_fixes:
enriched_details[env].update({'fixes': env_fixes})
ret['fixes'] = json.dumps(instance.get_fixes())
if instance.metric_regressions is not None:
metric_regressions = instance.get_metric_regressions()
if enriched_details:
for env in enriched_details.keys():
env_regressions = metric_regressions.get(env, None)
if env_regressions:
enriched_details[env].update({'metric_regressions': env_regressions})
ret['metric_regressions'] = json.dumps(metric_regressions)
if instance.metric_fixes is not None:
metric_fixes = instance.get_metric_fixes()
if enriched_details:
for env in enriched_details.keys():
env_fixes = metric_fixes.get(env, None)
if env_fixes:
enriched_details[env].update({'metric_fixes': env_fixes})
ret['metric_fixes'] = json.dumps(metric_fixes)
ret['details'] = enriched_details
return ret
class Meta:
model = ProjectStatus
fields = ('last_updated',
'finished',
'notified',
'notified_on_timeout',
'approved',
'tests_pass',
'tests_fail',
'tests_skip',
'tests_xfail',
'tests_total',
'pass_percentage',
'fail_percentage',
'skip_percentage',
'test_runs_total',
'test_runs_completed',
'test_runs_incomplete',
'has_metrics',
'has_tests',
'metrics_summary',
'build',
'baseline',
'created_at',
'regressions',
'fixes',
'metric_regressions',
'metric_fixes')
|
class ProjectStatusSerializer(DynamicFieldsModelSerializer, serializers.HyperlinkedModelSerializer):
def to_representation(self, instance):
pass
class Meta:
| 3 | 0 | 38 | 1 | 37 | 0 | 17 | 0 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 2 | 69 | 3 | 66 | 14 | 63 | 0 | 41 | 14 | 38 | 17 | 2 | 4 | 17 |
145,773 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/compute_project_statuses.py
|
squad.core.management.commands.compute_project_statuses.Command
|
class Command(BaseCommand):
help = """Compute project statuses and set the baseline field"""
def add_arguments(self, parser):
parser.add_argument('--project', help='Optionally, specify a project to compute, on the form $group/$project')
parser.add_argument('--show-progress', action='store_true', help='Prints out one dot per build in stdout')
parser.add_argument(
'--start-date',
dest="start_date",
default=(datetime.now() - timedelta(days=180)),
type=valid_date,
help="Start date for project status updates (default: 6 months before current date, format: YYYY-MM-DD)."
)
parser.add_argument(
'--end-date',
dest="end_date",
default=datetime.now(),
type=valid_date,
help="End date for project status updates (default: today, format: YYYY-MM-DD)."
)
def __progress__(self, show):
if show:
self.stdout.write(".", ending="")
self.stdout._out.flush()
def handle(self, *args, **options):
start_date = timezone.make_aware(options['start_date'])
end_date = timezone.make_aware(options['end_date'])
project_name = options['project'] or False
show_progress = options['show_progress']
logger.info("Filtering builds from %s to %s" % (start_date, end_date))
builds = Build.objects.filter(
datetime__range=(start_date, end_date)
)
project = None
if project_name:
slugs = project_name.split('/')
if len(slugs) != 2:
logger.error('Project "%s" is malformed (should be group_slug/project_slug). Exiting...' % (project_name))
return
try:
group_slug, project_slug = slugs
project = Project.objects.get(group__slug=group_slug, slug=project_slug)
except Project.DoesNotExist:
logger.error('Project "%s" does not exist. Exiting...' % (project_name))
return
logger.info('Filtering builds from project "%s"' % (project_name))
builds = builds.filter(project=project)
logger.info('Computing metrics summary for %d builds' % (builds.count()))
if show_progress:
logger.info('Showing progress, one dot means one processed build')
for build in builds.all():
self.__progress__(show_progress)
ProjectStatus.create_or_update(build)
if show_progress:
self.stdout.write("")
self.stdout._out.flush()
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def __progress__(self, show):
pass
def handle(self, *args, **options):
pass
| 4 | 0 | 20 | 2 | 18 | 0 | 3 | 0 | 1 | 5 | 3 | 0 | 3 | 0 | 3 | 3 | 65 | 10 | 55 | 14 | 51 | 0 | 41 | 14 | 37 | 7 | 1 | 2 | 10 |
145,774 |
Linaro/squad
|
Linaro_squad/squad/core/management/commands/compute_build_summaries.py
|
squad.core.management.commands.compute_build_summaries.Command
|
class Command(BaseCommand):
help = """Compute metric summaries per build per environment"""
def add_arguments(self, parser):
parser.add_argument('--project', help='Optionally, specify a project to compute, on the form $group/$project')
parser.add_argument('--show-progress', action='store_true', help='Prints out one dot per build in stdout')
parser.add_argument(
'--start-date',
dest="start_date",
default=(datetime.now() - timedelta(days=180)),
type=valid_date,
help="Start date for project status updates (default: 6 months before current date, format: YYYY-MM-DD)."
)
parser.add_argument(
'--end-date',
dest="end_date",
default=datetime.now(),
type=valid_date,
help="End date for project status updates (default: today, format: YYYY-MM-DD)."
)
def __progress__(self, show):
if show:
self.stdout.write(".", ending="")
self.stdout._out.flush()
def handle(self, *args, **options):
start_date = timezone.make_aware(options['start_date'])
end_date = timezone.make_aware(options['end_date'])
project_name = options['project'] or False
show_progress = options['show_progress']
builds = Build.objects.filter(
datetime__range=(start_date, end_date),
status__finished=True
)
project = None
if project_name:
slugs = project_name.split('/')
if len(slugs) != 2:
logger.error('Project "%s" is malformed (should be group_slug/project_slug). Exiting...' % (project_name))
return
try:
group_slug, project_slug = slugs
project = Project.objects.get(group__slug=group_slug, slug=project_slug)
except Project.DoesNotExist:
logger.error('Project "%s" does not exist. Exiting...' % (project_name))
return
logger.info('Filtering builds from project "%s"' % (project_name))
builds = builds.filter(project=project)
logger.info('Computing metrics summary for %d builds' % (builds.count()))
if show_progress:
logger.info('Showing progress, one dot means one processed build')
for build in builds.all():
self.__progress__(show_progress)
for environment in __get_environments__(build):
BuildSummary.create_or_update(build, environment)
if show_progress:
self.stdout.write("")
self.stdout._out.flush()
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def __progress__(self, show):
pass
def handle(self, *args, **options):
pass
| 4 | 0 | 20 | 2 | 18 | 0 | 4 | 0 | 1 | 5 | 3 | 0 | 3 | 0 | 3 | 3 | 67 | 11 | 56 | 15 | 52 | 0 | 41 | 15 | 37 | 8 | 1 | 2 | 11 |
145,775 |
Linaro/squad
|
Linaro_squad/squad/celery.py
|
squad.celery.SquadCelery
|
class SquadCelery(Celery):
def task(self, *args, **kwargs):
kw = {'base': MemoryUseLoggingTask}
kw.update(kwargs)
return super(SquadCelery, self).task(*args, **kw)
def send_task(self, *args, **options):
if settings.CELERY_BROKER_URL and settings.CELERY_BROKER_URL.startswith('sqs'):
options['MessageGroupId'] = str(time.time())
return super(SquadCelery, self).send_task(*args, **options)
|
class SquadCelery(Celery):
def task(self, *args, **kwargs):
pass
def send_task(self, *args, **options):
pass
| 3 | 0 | 5 | 1 | 4 | 0 | 2 | 0 | 1 | 3 | 1 | 0 | 2 | 0 | 2 | 2 | 13 | 4 | 9 | 4 | 6 | 0 | 9 | 4 | 6 | 2 | 1 | 1 | 3 |
145,776 |
Linaro/squad
|
Linaro_squad/squad/ci/management/commands/testfetch.py
|
squad.ci.management.commands.testfetch.Command
|
class Command(BaseCommand):
help = """Listens for "live" test results from CI backends"""
def add_arguments(self, parser):
parser.add_argument(
'--background', '-b',
action='store_true',
dest="background",
help="Fetch in the background (requires a running worker)",
)
parser.add_argument(
'BACKEND',
type=str,
help='Backend name to use to fetch',
)
parser.add_argument(
'JOBID',
type=str,
help='Job id to fetch',
)
parser.add_argument(
'PROJECT',
type=str,
help='Project to fetch the data into (Format: foo/bar)',
)
def handle(self, *args, **options):
backend_name = options.get("BACKEND")
job_id = options.get("JOBID")
group_slug, project_slug = options.get("PROJECT").split('/')
backend = Backend.objects.get(name=backend_name)
group, _ = Group.objects.get_or_create(slug=group_slug)
project, _ = group.projects.get_or_create(slug=project_slug)
build = project.builds.create(version=str(time.time()))
testjob = backend.test_jobs.create(target=project, job_id=job_id, target_build=build)
if options.get("background"):
fetch.apply_async(args=(testjob.id,), task_id=task_id(testjob))
else:
backend.fetch(testjob.id)
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 20 | 2 | 18 | 0 | 2 | 0 | 1 | 2 | 1 | 0 | 2 | 0 | 2 | 2 | 43 | 6 | 37 | 12 | 34 | 0 | 19 | 12 | 16 | 2 | 1 | 1 | 3 |
145,777 |
Linaro/squad
|
Linaro_squad/squad/ci/management/commands/listen.py
|
squad.ci.management.commands.listen.ListenerManager
|
class ListenerManager(object):
def __init__(self):
self.__processes__ = {}
self.__fields__ = {}
def run(self):
self.setup_signals()
self.wait_for_setup()
self.loop()
self.cleanup()
def setup_signals(self):
# make SIGTERM equivalent to SIGINT (e.g. control-c)
signal.signal(signal.SIGTERM, signal.getsignal(signal.SIGINT))
def wait_for_setup(self):
n = 0
while n < 24: # wait up to 2 min
try:
Backend.objects.count()
logger.info("listener manager started")
return
except OperationalError:
logger.info("Waiting to database to be up; will retry in 5s ...")
time.sleep(5)
n += 1
logger.error("Timed out waiting for database to be up")
sys.exit(1)
def keep_listeners_running(self):
ids = list(self.__processes__.keys())
for backend in Backend.objects.all():
process = self.__processes__.get(backend.id)
if process:
# listen disabled; stop
if not backend.listen_enabled:
self.stop(backend.id)
# already running: restart if needed; or if process is dead: restart
elif fields(backend) != self.__fields__[backend.id] or process.poll() is not None:
self.stop(backend.id)
self.start(backend)
else:
# not running, just start
if backend.listen_enabled:
self.start(backend)
if backend.id in ids:
ids.remove(backend.id)
# remaining backends were removed from the database, stop them
for backend_id in ids:
self.stop(backend_id)
def start(self, backend):
argv = [sys.executable, '-m', 'squad.manage', 'listen', backend.name]
listener = subprocess.Popen(argv)
self.__processes__[backend.id] = listener
self.__fields__[backend.id] = fields(backend)
def loop(self):
try:
while True:
self.keep_listeners_running()
# FIXME: ideally we should have a blocking call here that waits
# for a change to happen in the database, but we didn't find a
# simple/portable way of doing that yet. Let's just sleep for a
# few seconds instead, for now.
time.sleep(60)
except KeyboardInterrupt:
pass # cleanup() will terminate sub-processes
def cleanup(self):
for backend_id in list(self.__processes__.keys()):
self.stop(backend_id)
def stop(self, backend_id):
process = self.__processes__[backend_id]
if not process.poll():
process.terminate()
process.wait()
self.__processes__.pop(backend_id)
|
class ListenerManager(object):
def __init__(self):
pass
def run(self):
pass
def setup_signals(self):
pass
def wait_for_setup(self):
pass
def keep_listeners_running(self):
pass
def start(self, backend):
pass
def loop(self):
pass
def cleanup(self):
pass
def stop(self, backend_id):
pass
| 10 | 0 | 8 | 0 | 7 | 1 | 2 | 0.18 | 1 | 4 | 1 | 0 | 9 | 2 | 9 | 9 | 82 | 11 | 62 | 21 | 52 | 11 | 60 | 21 | 50 | 8 | 1 | 3 | 22 |
145,778 |
Linaro/squad
|
Linaro_squad/squad/ci/management/commands/listen.py
|
squad.ci.management.commands.listen.Listener
|
class Listener(object):
def __init__(self, backend):
self.backend = backend
self.implementation = backend.get_implementation()
def run(self):
backend = self.backend
impl = self.implementation
logger.info("Backend %s starting" % backend.name)
signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
impl.listen()
logger.info("Backend %s exited on its own" % backend.name)
def stop(self, signal, stack_frame):
logger.info("Backend %s finishing ..." % self.backend.name)
sys.exit()
|
class Listener(object):
def __init__(self, backend):
pass
def run(self):
pass
def stop(self, signal, stack_frame):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 19 | 4 | 15 | 8 | 11 | 0 | 15 | 8 | 11 | 1 | 1 | 0 | 3 |
145,779 |
Linaro/squad
|
Linaro_squad/squad/ci/management/commands/listen.py
|
squad.ci.management.commands.listen.Command
|
class Command(BaseCommand):
help = """Listens for "live" test results from CI backends"""
def add_arguments(self, parser):
parser.add_argument(
'BACKEND',
nargs='?',
type=str,
help='Backend name to listen to. If ommited, start the master process.',
)
def handle(self, *args, **options):
backend_name = options.get("BACKEND")
if backend_name:
backend = Backend.objects.get(name=backend_name)
Listener(backend).run()
else:
ListenerManager().run()
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
| 3 | 0 | 7 | 0 | 7 | 0 | 2 | 0 | 1 | 4 | 3 | 0 | 2 | 0 | 2 | 2 | 18 | 2 | 16 | 6 | 13 | 0 | 10 | 6 | 7 | 2 | 1 | 1 | 3 |
145,780 |
Linaro/squad
|
Linaro_squad/squad/ci/management/commands/create_tuxsuite_boot_tests.py
|
squad.ci.management.commands.create_tuxsuite_boot_tests.Command
|
class Command(BaseCommand):
help = """Create boot retroactive tests for TuxSuite backends"""
def add_arguments(self, parser):
parser.add_argument(
"--project",
required=True,
help="Project to fetch the data into (Format: foo/bar)",
)
parser.add_argument(
"--days",
required=True,
help="How many days going backwards tests should be checked on TuxSuite. Maximum is 90 days",
)
def get_build_name(self, testjob):
test_url = testjob.url
if test_url not in cache:
cache[test_url] = requests.get(test_url).json()
build_ksuid = cache[test_url].get("waiting_for")
if build_ksuid in [None, ""]:
logger.warning(f"No 'waiting_for' in {test_url}: {cache[test_url]}")
return None
_, _, test_ksuid = testjob.backend.get_implementation().parse_job_id(testjob.job_id)
build_url = test_url.replace(test_ksuid, build_ksuid).replace("tests", "builds")
if build_url not in cache:
cache[build_url] = requests.get(build_url).json()
build_metadata = cache[build_url]
if "toolchain" not in build_metadata or "kconfig" not in build_metadata:
logger.warning(f"No 'toolchain' or 'kconfig' in {build_url}: {cache[build_url]}")
return None
return testjob.backend.get_implementation().generate_test_name(build_metadata)
def get_boot_result(self, testjob):
test_url = testjob.url
if test_url not in cache:
cache[test_url] = requests.get(test_url).json()
return cache[test_url]["results"]["boot"]
def handle(self, *args, **options):
"""
Background
TuxSuite backends run builds and tests. For tests, by default we didn't use to keep a boot
test. This is being implemented now (date of this commit) so we needed a way to retractively
add such boot tests.
"""
group_slug, project_slug = options.get("project").split("/")
group = Group.objects.get(slug=group_slug)
project = group.projects.get(slug=project_slug)
testjobs = TestJob.objects.filter(job_id__startswith="TEST", backend__implementation_type="tuxsuite", target=project).prefetch_related("backend")
logger.info(f"Working on {testjobs.count()} testjobs")
tests_created = 0
tests_exsisting = 0
bad_jobs = 0
suite, created = project.suites.get_or_create(slug="boot")
for testjob in testjobs:
testrun = testjob.testrun
if testrun is None:
bad_jobs += 1
continue
print(".", end="")
build_name = testrun.metadata.get("build_name")
if build_name is None:
build_name = self.get_build_name(testjob)
if build_name is None:
logger.info(f"Seems like Tuxsuite no longer keeps {testjob.url}, aborting now")
break
boot_test_name = build_name
metadata, _ = SuiteMetadata.objects.get_or_create(kind="test", name=boot_test_name, suite="boot")
if testrun.tests.filter(metadata=metadata).exists():
print(":", end="")
tests_exsisting += 1
continue
boot_result = self.get_boot_result(testjob)
testrun.tests.create(
build=testrun.build,
environment=testrun.environment,
metadata=metadata,
result=(boot_result == "pass"),
suite=suite,
)
tests_created += 1
logger.info(f"Done: {tests_created} tests created, {tests_exsisting} tests exist already and {bad_jobs} jobs did not generate testruns")
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def get_build_name(self, testjob):
pass
def get_boot_result(self, testjob):
pass
def handle(self, *args, **options):
'''
Background
TuxSuite backends run builds and tests. For tests, by default we didn't use to keep a boot
test. This is being implemented now (date of this commit) so we needed a way to retractively
add such boot tests.
'''
pass
| 5 | 1 | 24 | 5 | 18 | 2 | 4 | 0.08 | 1 | 2 | 2 | 0 | 4 | 0 | 4 | 4 | 102 | 23 | 73 | 26 | 68 | 6 | 59 | 26 | 54 | 6 | 1 | 2 | 14 |
145,781 |
Linaro/squad
|
Linaro_squad/test/core/test_import_data.py
|
test.core.test_import_data.ImportTest
|
class ImportTest(TestCase):
def setUp(self):
d = os.path.join(os.path.dirname(__file__), 'test_import_data_input')
call_command('import_data', '--silent', 'foo/bar', d)
def test_import_basics(self):
group = Group.objects.get(slug='foo')
project = group.projects.get(slug='bar')
self.assertEqual(2, project.builds.count())
builds = [row['version'] for row in project.builds.values('version')]
self.assertEqual(['1', '2'], sorted(builds))
self.assertEqual(1, project.builds.all()[0].test_runs.count())
self.assertEqual(1, project.builds.all()[1].test_runs.count())
def test_import_dates(self):
dates = [t.datetime for t in TestRun.objects.all()]
self.assertIsNotNone(dates[0])
self.assertEqual(dates[0], dates[1])
def test_import_metrics(self):
self.assertEqual(2, Metric.objects.count())
def test_import_tests(self):
self.assertEqual(1, Test.objects.count())
def test_import_attachments(self):
t = Build.objects.get(version='2').test_runs.last()
self.assertIsNotNone(t.attachments.get(filename='screenshot.png'))
|
class ImportTest(TestCase):
def setUp(self):
pass
def test_import_basics(self):
pass
def test_import_dates(self):
pass
def test_import_metrics(self):
pass
def test_import_tests(self):
pass
def test_import_attachments(self):
pass
| 7 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 4 | 4 | 0 | 6 | 0 | 6 | 6 | 31 | 8 | 23 | 13 | 16 | 0 | 23 | 13 | 16 | 1 | 1 | 0 | 6 |
145,782 |
Linaro/squad
|
Linaro_squad/squad/core/comparison.py
|
squad.core.comparison.TestComparison
|
class TestComparison(BaseComparison):
__test__ = False
def __init__(self, *builds, regressions_and_fixes_only=False):
self.__intermittent__ = {}
self.tests_with_issues = {}
self.__failures__ = None
self.regressions_and_fixes_only = regressions_and_fixes_only
BaseComparison.__init__(self, *builds)
def __extract_results__(self):
# New implementation below is only stable for getting regressions and fixes
# that is used for receiving tests and generating ProjectStatus.regressions and fixes
# It is still not good for applying transitions and getting a comparison
# results table, for that, use legacy code, which is slow and eats up lots
# of memory
if self.regressions_and_fixes_only:
self.__new_extract_results__()
return
test_runs = models.TestRun.objects.filter(
build__in=self.builds,
).prefetch_related(
'build',
'environment',
).only('build', 'environment')
test_runs_ids = {}
for test_run in test_runs:
build = test_run.build
env = test_run.environment.slug
self.all_environments.add(env)
self.environments[build].add(env)
if test_runs_ids.get(test_run.id, None) is None:
test_runs_ids[test_run.id] = (build, env)
for ids in split_dict(test_runs_ids, chunk_size=100):
self.__extract_test_results__(ids)
self.__resolve_intermittent_tests__()
self.results = OrderedDict(sorted(self.results.items()))
for build in self.builds:
self.environments[build] = sorted(self.environments[build])
def __extract_test_results__(self, test_runs_ids):
self.__failures__ = OrderedDict()
tests = models.Test.objects.filter(test_run_id__in=test_runs_ids.keys()).annotate(
suite_slug=F('suite__slug'),
).prefetch_related('metadata').defer('log').order_by()
for test in tests:
build, env = test_runs_ids.get(test.test_run_id)
full_name = join_name(test.suite_slug, test.name)
if full_name not in self.results:
self.results[full_name] = OrderedDict()
key = (build, env)
if key in self.results[full_name]: # Duplicate found.
if not isinstance(self.results[full_name][key], tuple):
# Test confidence is NOT already caclulated.
self.results[full_name][key] = test_confidence(test)
else:
self.results[full_name][key] = test.status
if test.has_known_issues:
self.tests_with_issues[test.id] = (full_name, env)
if test.status == 'fail' and build.id == self.builds[-1].id:
if env not in self.__failures__:
self.__failures__[env] = []
self.__failures__[env].append(test)
def __resolve_intermittent_tests__(self):
if len(self.tests_with_issues) == 0:
return
for chunk in split_dict(self.tests_with_issues, chunk_size=100):
tests = models.Test.objects.filter(id__in=chunk.keys()).prefetch_related(
'known_issues'
).only('known_issues')
for test in tests.all():
for issue in test.known_issues.all():
if issue.intermittent:
self.__intermittent__[chunk[test.id]] = True
break
__regressions__ = None
__fixes__ = None
@property
def regressions(self):
if self.__regressions__ is None:
self.__regressions__ = self.__status_changes__(('pass', 'fail'))
return self.__regressions__
@property
def fixes(self):
if self.__fixes__ is None:
self.__fixes__ = self.__status_changes__(
('fail', 'pass'),
('xfail', 'pass'),
predicate=lambda test, env: (test, env) not in self.__intermittent__
)
return self.__fixes__
@property
def failures(self):
if self.__failures__ is None:
self.__failures__ = OrderedDict()
build = self.builds[-1]
failures = build.tests.filter(result=False, has_known_issues=False).prefetch_related('environment')
for failure in failures.all():
env = failure.environment.slug
if env not in self.__failures__:
self.__failures__[env] = []
self.__failures__[env].append(failure)
return self.__failures__
def apply_transitions(self, transitions):
if transitions is None or len(transitions) == 0:
return
filtered = self.__status_changes__(*transitions)
self.all_environments = set(filtered.keys())
self.environments = OrderedDict({build: self.all_environments for build in self.builds})
self.__regressions__ = None
self.__fixes__ = None
self.__diff__ = None
if len(filtered) == 0:
self.results = OrderedDict()
return
# filter results
all_tests = set(reduce(lambda a, b: a + b, filtered.values()))
self.results = OrderedDict({full_name: self.results[full_name] for full_name in all_tests})
self.results = OrderedDict(sorted(self.results.items()))
def __status_changes__(self, *transitions, predicate=lambda test, env: True):
if len(self.builds) < 2:
return {}
comparisons = OrderedDict()
after = self.builds[-1] # last
before = self.builds[-2] # second to last
for env in self.all_environments:
comparison_list = []
# Let's try to avoid using .diff, it's only used here
# and in core/notification.py to determine if there is change
# between builds
for test, results in self.diff.items():
results_after = results.get((after, env), 'n/a')
results_before = results.get((before, env), 'n/a')
if (results_before, results_after) in transitions:
if predicate(test, env):
comparison_list.append(test)
if comparison_list:
comparisons[env] = comparison_list
return comparisons
def __new_extract_results__(self):
"""
The target build should be the most recent once and the baseline
an older build.
If a test in the target build with result=Fail has a match in the
baseline build with result=True, it's considered to be a regression.
If a test in the target build with result=True has a match in the
baseline build with result=False, it's considered to be a fix.
| baseline / target | test.result == False | test.result == True |
|----------------------|----------------------|---------------------|
| test.result == False | - | fix |
| test.result == True | regression | - |
We just added a reference to Build and Environment to the Test model so that
we could make regressions and fixes easy and light to run in the database
"""
self.__diff__ = defaultdict(lambda: defaultdict(lambda: defaultdict()))
self.__regressions__ = OrderedDict()
self.__fixes__ = OrderedDict()
if self.builds[0] is None:
# No baseline is present, then no comparison is needed
return
baseline = self.builds[0]
target = self.builds[1]
query = self.base_sql.copy()
query['select'].append('target.result')
query['select'].append('target.has_known_issues')
query['from'].append('core_test baseline')
query['from'].append('core_test target')
sql = self.__render_sql__(query)
tests = [t for t in models.Test.objects.raw(sql)]
prefetch_related_objects(tests, 'metadata', 'suite')
env_ids = [t.environment_id for t in tests]
envs = {e.id: e for e in models.Environment.objects.filter(id__in=env_ids).all()}
envs_slugs = sorted({e.slug for e in envs.values()})
for build in self.builds:
self.environments[build] = envs_slugs
fixed_tests = defaultdict(set)
regressions = defaultdict(set)
fixes = defaultdict(set)
for test in tests:
env_id = test.environment_id
full_name = test.full_name
if full_name not in self.results:
self.results[full_name] = OrderedDict()
baseline_key = (baseline, envs[env_id].slug)
target_key = (target, envs[env_id].slug)
if test.status == 'fail':
regressions[env_id].add(full_name)
self.results[full_name][target_key] = 'fail'
self.results[full_name][baseline_key] = 'pass'
elif test.status == 'pass':
fixes[env_id].add(full_name)
fixed_tests[env_id].add(test.metadata_id)
self.results[full_name][target_key] = 'pass'
self.results[full_name][baseline_key] = 'fail'
self.results = OrderedDict(sorted(self.results.items()))
for env_id in regressions.keys():
self.__regressions__[envs[env_id].slug] = list(regressions[env_id])
# It's not a fix if baseline test is intermittent for a given environment:
# - test.has_known_issues == True and
# - test.known_issues[env].intermittent == True
fixed_tests_environment_slugs = [envs[env_id] for env_id in fixed_tests.keys()]
intermittent_fixed_tests = self.__intermittent_fixed_tests__(fixed_tests, fixed_tests_environment_slugs)
for env_id in fixes.keys():
env_slug = envs[env_id].slug
test_list = [test for test in fixes[env_id] if (test, env_slug) not in intermittent_fixed_tests]
if len(test_list):
self.__fixes__[env_slug] = test_list
for env in envs.values():
if env.slug in self.__regressions__:
for test in self.__regressions__[env.slug]:
self.__diff__[test][target][env.slug] = False
self.__diff__[test][baseline][env.slug] = True
if env.slug in self.__fixes__:
for test in self.__fixes__[env.slug]:
self.__diff__[test][target][env.slug] = True
self.__diff__[test][baseline][env.slug] = False
def __intermittent_fixed_tests__(self, fixed_tests, environment_slugs):
intermittent_fixed_tests = {}
if len(fixed_tests) == 0:
return intermittent_fixed_tests
metadata_ids = []
for env_id in fixed_tests.keys():
metadata_ids += list(fixed_tests[env_id])
baseline = self.builds[0]
baseline_tests = models.Test.objects.filter(
build=baseline,
metadata_id__in=metadata_ids,
result=False,
has_known_issues=True
).prefetch_related('known_issues', 'suite', 'metadata', 'environment').defer('log').order_by()
if self.__same_projects__():
environment_ids = list(fixed_tests.keys())
baseline_tests = baseline_tests.filter(environment_id__in=environment_ids)
else:
baseline_tests = baseline_tests.filter(environment__slug__in=environment_slugs)
for test in baseline_tests.all():
for issue in test.known_issues.all():
if issue.intermittent:
key = (test.full_name, test.environment.slug)
intermittent_fixed_tests[key] = True
return intermittent_fixed_tests
|
class TestComparison(BaseComparison):
def __init__(self, *builds, regressions_and_fixes_only=False):
pass
def __extract_results__(self):
pass
def __extract_test_results__(self, test_runs_ids):
pass
def __resolve_intermittent_tests__(self):
pass
@property
def regressions(self):
pass
@property
def fixes(self):
pass
@property
def failures(self):
pass
def apply_transitions(self, transitions):
pass
def __status_changes__(self, *transitions, predicate=lambda test, env: True):
pass
def __new_extract_results__(self):
'''
The target build should be the most recent once and the baseline
an older build.
If a test in the target build with result=Fail has a match in the
baseline build with result=True, it's considered to be a regression.
If a test in the target build with result=True has a match in the
baseline build with result=False, it's considered to be a fix.
| baseline / target | test.result == False | test.result == True |
|----------------------|----------------------|---------------------|
| test.result == False | - | fix |
| test.result == True | regression | - |
We just added a reference to Build and Environment to the Test model so that
we could make regressions and fixes easy and light to run in the database
'''
pass
def __intermittent_fixed_tests__(self, fixed_tests, environment_slugs):
pass
| 15 | 1 | 26 | 5 | 18 | 3 | 6 | 0.15 | 1 | 7 | 3 | 0 | 11 | 8 | 11 | 21 | 302 | 64 | 210 | 86 | 195 | 31 | 186 | 83 | 174 | 15 | 2 | 4 | 61 |
145,783 |
Linaro/squad
|
Linaro_squad/squad/core/data.py
|
squad.core.data.JSONMetricDataParser
|
class JSONMetricDataParser(object):
"""
Parser for JSON metric data
"""
@staticmethod
def __call__(json_text):
if json_text is None or json_text == '':
return []
input_data = json.loads(json_text)
data = []
for metric, value_dict in input_data.items():
unit = None
if type(value_dict) is dict:
unit = value_dict.get('unit', None)
value = value_dict.get('value', None)
else:
value = value_dict
group_name, name = parse_name(metric)
result, measurements = parse_metric(value)
if result is not None and not (math.isnan(result) or math.isinf(result)):
data.append({
"name": name,
"group_name": group_name,
"result": result,
"measurements": measurements,
"unit": unit,
})
return data
|
class JSONMetricDataParser(object):
'''
Parser for JSON metric data
'''
@staticmethod
def __call__(json_text):
pass
| 3 | 1 | 24 | 1 | 23 | 0 | 5 | 0.12 | 1 | 2 | 0 | 0 | 0 | 0 | 1 | 1 | 30 | 2 | 25 | 10 | 22 | 3 | 17 | 9 | 15 | 5 | 1 | 2 | 5 |
145,784 |
Linaro/squad
|
Linaro_squad/squad/core/data.py
|
squad.core.data.JSONTestDataParser
|
class JSONTestDataParser(object):
"""
Parser for test data as JSON string
"""
@staticmethod
def __call__(test_data):
if test_data is None or test_data == '':
return []
input_data = json.loads(test_data)
data = []
for key, value in input_data.items():
group_name, test_name = parse_name(key)
result = value
log = None
if isinstance(value, dict):
result = value.get('result', None)
log = value.get('log', None)
data.append({
"group_name": group_name,
"test_name": test_name,
"pass": parse_test_result(result),
"log": log
})
return data
|
class JSONTestDataParser(object):
'''
Parser for test data as JSON string
'''
@staticmethod
def __call__(test_data):
pass
| 3 | 1 | 20 | 1 | 19 | 0 | 4 | 0.14 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 26 | 2 | 21 | 9 | 18 | 3 | 15 | 8 | 13 | 4 | 1 | 2 | 4 |
145,785 |
Linaro/squad
|
Linaro_squad/squad/core/history.py
|
squad.core.history.TestHistory
|
class TestHistory(object):
__test__ = False
def __init__(self, project, full_test_name, top=None, page=1, per_page=20):
suite_slug, test_name = parse_name(full_test_name)
self.test = full_test_name
self.paginator = Paginator(project.builds.reverse(), per_page)
if top:
self.number = 0
builds = project.builds.filter(datetime__lte=top.datetime).reverse()[0:per_page - 1]
else:
self.number = page
builds = self.paginator.page(page)
if len(builds) == 0:
raise Build.DoesNotExist
self.top = builds[0]
issues_by_env = {}
for issue in KnownIssue.active_by_project_and_test(project, full_test_name).all():
for env in issue.environments.all():
if env.id not in issues_by_env:
issues_by_env[env.id] = []
issues_by_env[env.id].append(issue)
suite = project.suites.prefetch_related('metadata').get(slug=suite_slug)
metadata = SuiteMetadata.objects.get(kind='test', suite=suite_slug, name=test_name)
results = defaultdict()
environments_ids = set()
for build in builds:
results[build] = defaultdict(list)
for test in build.tests.filter(metadata=metadata).order_by():
test.metadata = metadata
test.suite = suite
results[build][test.environment_id].append(test)
environments_ids.add(test.environment_id)
results_without_duplicates = defaultdict()
for build in results:
results_without_duplicates[build] = defaultdict()
for env in results[build]:
tests = results[build][env]
is_duplicate = len(tests) > 1
known_issues = issues_by_env.get(tests[0].environment_id)
result = TestResult(tests[0], suite, metadata, known_issues, is_duplicate, list_of_duplicates=tests)
results_without_duplicates[build][env] = result
self.environments = Environment.objects.filter(id__in=environments_ids).order_by('slug')
self.results = results_without_duplicates
|
class TestHistory(object):
def __init__(self, project, full_test_name, top=None, page=1, per_page=20):
pass
| 2 | 0 | 50 | 9 | 41 | 0 | 10 | 0 | 1 | 7 | 5 | 0 | 1 | 6 | 1 | 1 | 54 | 11 | 43 | 25 | 41 | 0 | 42 | 25 | 40 | 10 | 1 | 3 | 10 |
145,786 |
Linaro/squad
|
Linaro_squad/squad/core/history.py
|
squad.core.history.TestResult
|
class TestResult(object):
__test__ = False
class TestRunStatus(object):
def __init__(self, test_run_id, suite):
self.test_run_id = test_run_id
self.suite = suite
def __init__(self, test, suite, metadata, known_issues, is_duplicate=False, list_of_duplicates=None):
self.test = test
self.suite = suite
self.known_issues = known_issues
if is_duplicate:
self.status, self.confidence_score = test_confidence(None, list_of_duplicates=list_of_duplicates)
else:
self.status, self.confidence_score = (test.status, None)
self.test_run_id = test.test_run_id
self.test_run_status = self.TestRunStatus(self.test_run_id, self.suite)
self.info = {
"test_description": metadata.description if metadata else '',
"test_instructions": metadata.instructions_to_reproduce if metadata else '',
"suite_instructions": self.suite.metadata.instructions_to_reproduce if self.suite.metadata else '',
"test_log": test.log
}
|
class TestResult(object):
class TestRunStatus(object):
def __init__(self, test_run_id, suite):
pass
def __init__(self, test_run_id, suite):
pass
| 4 | 0 | 10 | 0 | 10 | 0 | 3 | 0 | 1 | 1 | 1 | 0 | 1 | 8 | 1 | 1 | 25 | 3 | 22 | 14 | 18 | 0 | 16 | 14 | 12 | 5 | 1 | 1 | 6 |
145,787 |
Linaro/squad
|
Linaro_squad/squad/ci/exceptions.py
|
squad.ci.exceptions.TemporarySubmissionIssue
|
class TemporarySubmissionIssue(SubmissionIssue):
retry = True
|
class TemporarySubmissionIssue(SubmissionIssue):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
145,788 |
Linaro/squad
|
Linaro_squad/squad/ci/exceptions.py
|
squad.ci.exceptions.TemporaryFetchIssue
|
class TemporaryFetchIssue(FetchIssue):
retry = True
|
class TemporaryFetchIssue(FetchIssue):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
145,789 |
Linaro/squad
|
Linaro_squad/squad/ci/exceptions.py
|
squad.ci.exceptions.SubmissionIssue
|
class SubmissionIssue(Exception):
retry = False
|
class SubmissionIssue(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
145,790 |
Linaro/squad
|
Linaro_squad/squad/ci/exceptions.py
|
squad.ci.exceptions.FetchIssue
|
class FetchIssue(Exception):
retry = False
|
class FetchIssue(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
145,791 |
Linaro/squad
|
Linaro_squad/squad/ci/backend/tuxsuite.py
|
squad.ci.backend.tuxsuite.Backend
|
class Backend(BaseBackend):
def has_resubmit(self):
return False
def has_cancel(self):
return True
@staticmethod
def get_session():
global requests_session
if requests_session is None:
retry_strategy = Retry(
total=5,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504])
adapter = HTTPAdapter(max_retries=retry_strategy)
requests_session = requests.Session()
requests_session.mount('http://', adapter)
requests_session.mount('https://', adapter)
return requests_session
"""
TuxSuite backend is intended for processing data coming from TuxTest
"""
def generate_test_name(self, results):
"""
Generates a name based on toolchain and config. Here are few examples:
1) toolchain: gcc-9, kconfig: ['defconfig']
-> returns 'gcc-9-defconfig'
2) toolchain: gcc-9, kconfig: ['defconfig', 'CONFIG_LALA=y']
-> returns 'gcc-9-defconfig-6bbfee93'
-> hashlib.sha1('CONFIG_LALA=y')[0:8]
3) toolchain: gcc-9, kconfig: ['defconfig', 'CONFIG_LALA=y', 'https://some.com/kconfig']
-> returns 'gcc-9-defconfig-12345678'
-> hashlib.sha1(
sorted(
'CONFIG_LALA=y',
'https://some.com/kconfig',
)
)
"""
name = results['toolchain']
# If there are any configuration coming from a URL,
# fetch it then merge all in a dictionary for later
# hash it and make up the name
configs = results['kconfig']
name += f'-{configs[0]}'
configs = configs[1:]
if len(configs):
sha = hashlib.sha1()
for config in configs:
sha.update(f'{config}'.encode())
name += '-' + sha.hexdigest()[0:8]
return name
def parse_job_id(self, job_id):
"""
Parsing the job id means getting back specific TuxSuite information
from job_id. Ex:
Given a job_id = "BUILD:linaro@anders#1yPYGaOEPNwr2pCqBgONY43zORq",
the return value should be a tuple like
('BUILD', 'linaro@anders', '1yPYGaOEPNwr2pCqBgONY43zORq')
The leading string determines the type of the tuxsuite object:
- BUILD
- OEBUILD
- TEST
"""
regex = r'^(OEBUILD|BUILD|TEST):([0-9a-z_\-.]+@[0-9a-z_\-.]+)#([a-zA-Z0-9]+)$'
matches = re.findall(regex, job_id)
if len(matches) == 0:
raise FetchIssue(f'Job id "{job_id}" does not match "{regex}"')
# The regex below is supposed to find only one match
return matches[0]
def generate_job_id(self, result_type, result):
"""
The job id for TuxSuite results is generated using 3 pieces of info:
1. If it's either "BUILD", "OEBUILD" or "TEST" result;
2. The TuxSuite project. Ex: "linaro/anders"
3. The ksuid of the object. Ex: "1yPYGaOEPNwr2pfqBgONY43zORp"
A couple examples for job_id are:
- BUILD:linaro@anders#1yPYGaOEPNwr2pCqBgONY43zORq
- OEBUILD:linaro@lkft#2Wetiz7Qs0TbtfPgPT7hUObWqDK
- TEST:arm@bob#1yPYGaOEPNwr2pCqBgONY43zORp
Then it's up to SQUAD's TuxSuite backend to parse the job_id
and fetch results properly.
"""
_type = result_type.upper()
project = result["project"].replace("/", "@")
uid = result["uid"]
return f"{_type}:{project}#{uid}"
def fetch_url(self, *urlbits):
url = reduce(urljoin, urlbits)
try:
response = Backend.get_session().request("GET", url)
except Exception as e:
raise TemporaryFetchIssue(f"Can't retrieve from {url}: {e}")
return response
def fetch_from_results_input(self, test_job):
try:
return json.loads(test_job.input)
except Exception as e:
logger.error(f"Can't parse results from job's input: {e}")
return None
def set_build_name(self, test_job, job_url, results, metadata, settings):
"""
Tuxsuite allows plans with builds and tests within.
Some of these plans also support "special tests", which are
kind a sanity test to run before spinning a heavy load of tests.
Here's the default plan hierarchy:
- build -> tests
Now with sanity tests in between:
- build -> sanity tests -> tests
SQUAD needs to get to the build level in
order to retrieve the build object and finally retrieve
its build name attribute
"""
build_id = results['waiting_for']
if build_id is None or build_id.startswith('OEBUILD'):
return
items = build_id.split('#')
if len(items) == 2:
_type = items[0]
_id = items[1]
else:
_type = "BUILD"
_id = items[0]
test_id = results['uid']
try:
# Check if the target build or sanity test is fetched
job_id = self.generate_job_id(_type.lower(), results)
job_id = job_id.replace(test_id, _id)
candidate = TestRun.objects.get(
build=test_job.target_build,
job_id=job_id
)
build_name = candidate.metadata.get('build_name')
if build_name:
metadata['build_name'] = build_name
return
except TestRun.DoesNotExist:
pass
# It is a sanity test, an extra request is needed to get build id
if _type == 'TEST':
follow_test_url = job_url.replace(test_id, _id)
test_json = self.fetch_url(follow_test_url).json()
build_id = test_json.get('waiting_for')
build_id = build_id.replace('BUILD#', '')
build_url = job_url.replace(test_id, build_id).replace('/tests/', '/builds/')
build_metadata = self.fetch_url(build_url).json()
build_metadata_keys = settings.get('TEST_BUILD_METADATA_KEYS', [])
metadata.update({k: build_metadata.get(k) for k in build_metadata_keys})
if 'toolchain' in build_metadata_keys and 'kconfig' in build_metadata_keys and metadata['build_name'] in [None, '']:
metadata['build_name'] = self.generate_test_name(build_metadata)
def parse_build_results(self, test_job, job_url, results, settings):
required_keys = ['build_status', 'warnings_count', 'download_url', 'retry']
self.__check_required_keys__(required_keys, results)
# Generate generic test/metric name
test_name = results.get('build_name') or self.generate_test_name(results)
test_job.name = test_name
build_status = results['build_status']
if build_status == 'error' and results['retry'] < 2:
# SQUAD should retry fetching the build until retry == 2
raise TemporaryFetchIssue(results.get('status_message', 'TuxSuite Error'))
# Make metadata
metadata_keys = settings.get('BUILD_METADATA_KEYS', [])
metadata = {k: results.get(k) for k in metadata_keys}
# Add extra metadata from metadata file if it exists
self.update_metadata_from_file(results=results, metadata=metadata)
metadata['job_url'] = job_url
metadata['job_id'] = test_job.job_id
metadata['config'] = urljoin(results.get('download_url') + '/', 'config')
metadata['build_name'] = test_name
# Create tests and metrics
tests = {}
metrics = {}
completed = True
if results['retry'] >= 2:
# This indicates that TuxSuite gave up trying to work on this build
status = 'Incomplete'
tests[f'build/{test_name}'] = 'skip'
logs = ''
else:
status = 'Complete'
tests[f'build/{test_name}'] = build_status
metrics[f'build/{test_name}-warnings'] = results['warnings_count']
logs = self.fetch_url(results['download_url'], 'build.log').text
try:
metrics[f'build/{test_name}-duration'] = results['tuxmake_metadata']['results']['duration']['build']
except KeyError:
raise FetchIssue('Missing duration from build results')
return status, completed, metadata, tests, metrics, logs
def parse_oebuild_results(self, test_job, job_url, results, settings):
required_keys = ['download_url', 'result']
self.__check_required_keys__(required_keys, results)
# Make metadata
metadata_keys = settings.get('OEBUILD_METADATA_KEYS', [])
metadata = {k: results.get(k) for k in metadata_keys}
metadata['job_url'] = job_url
metadata['job_id'] = test_job.job_id
sources = results.get('sources')
if sources:
metadata['sources'] = sources
# Create tests and metrics
tests = {}
metrics = {}
completed = True
status = 'Complete'
tests['build/build'] = 'pass' if results['result'] == 'pass' else 'fail'
logs = self.fetch_url(results['download_url'], 'build.log').text
return status, completed, metadata, tests, metrics, logs
def update_metadata_from_file(self, results, metadata):
if "download_url" in results:
download_url = results["download_url"]
try:
metadata_response = self.fetch_url(download_url + '/' + 'metadata.json')
# If fetching the metadata file did not error, decode it as json
if metadata_response.ok:
metadata.update(metadata_response.json())
except TemporaryFetchIssue:
pass
def parse_test_results(self, test_job, job_url, results, settings):
status = 'Complete'
completed = True
tests = {}
metrics = {}
logs = ''
# Pick up some metadata from results
metadata_keys = settings.get('TEST_METADATA_KEYS', [])
metadata = {k: results.get(k) for k in metadata_keys}
# Add extra metadata from metadata file if it exists
self.update_metadata_from_file(results=results, metadata=metadata)
metadata['job_url'] = job_url
metadata['job_id'] = test_job.job_id
# Set job name
try:
results['tests'].remove('boot')
except ValueError:
pass
test_job.name = ','.join(results['tests'])
if results['results'] == {}:
waiting_for = results.get('waiting_for')
if waiting_for is None:
test_job.failure = 'no results'
elif 'BUILD' in waiting_for:
test_job.failure = 'build failed'
else:
test_job.failure = 'sanity test failed'
return status, completed, metadata, tests, metrics, logs
# Fetch results even if the job fails, but has results
if results['result'] == 'fail':
test_job.failure = str(results['results'])
elif results['result'] == 'error':
test_job.failure = 'tuxsuite infrastructure error'
return 'Incomplete', completed, metadata, tests, metrics, logs
# If boot result is unkown, a retry is needed, otherwise, it either passed or failed
if 'unknown' == results['results']['boot']:
return None
# Retrieve TuxRun log
logs = self.fetch_url(job_url + '/', 'logs?format=txt').text
# Follow up the chain and retrieve build name
self.set_build_name(test_job, job_url, results, metadata, settings)
# Create a boot test
boot_test_name = 'boot/' + (metadata.get('build_name') or 'boot')
tests[boot_test_name] = results['results']['boot']
# Really fetch test results
tests_results = self.fetch_url(job_url + '/', 'results').json()
if tests_results.get('error', None) is None:
for suite, suite_tests in tests_results.items():
if suite == 'lava':
continue
suite_name = re.sub(r'^[0-9]+_', '', suite)
for name, test_data in suite_tests.items():
test_name = f'{suite_name}/{name}'
result = test_data['result']
# TODO: Log lines are off coming from TuxRun/LAVA
# test_log = self.get_test_log(log_dict, test)
tests[test_name] = result
return status, completed, metadata, tests, metrics, logs
def fetch(self, test_job):
url = self.job_url(test_job)
if test_job.input:
results = self.fetch_from_results_input(test_job)
test_job.input = None
else:
results = self.fetch_url(url).json()
if results.get('state') != 'finished':
return None
settings = self.__resolve_settings__(test_job)
result_type = self.parse_job_id(test_job.job_id)[0]
parse_results = getattr(self, f'parse_{result_type.lower()}_results')
return parse_results(test_job, url, results, settings)
def job_url(self, test_job):
result_type, tux_project, tux_uid = self.parse_job_id(test_job.job_id)
tux_group, tux_user = tux_project.split('@')
endpoint = f'groups/{tux_group}/projects/{tux_user}/{result_type.lower()}s/{tux_uid}'
return urljoin(self.data.url, endpoint)
def __check_required_keys__(self, required_keys, results):
missing_keys = []
for k in required_keys:
if k not in results:
missing_keys.append(k)
if len(missing_keys):
keys = ', '.join(missing_keys)
results_json = json.dumps(results)
raise FetchIssue(f'{keys} are required and missing from {results_json}')
def __resolve_settings__(self, test_job):
result_settings = self.settings
if getattr(test_job, 'target', None) is not None \
and test_job.target.project_settings is not None:
ps = yaml.safe_load(test_job.target.project_settings) or {}
result_settings.update(ps)
return result_settings
def cancel(self, testjob):
result_type, tux_project, tux_uid = self.parse_job_id(testjob.job_id)
tux_group, tux_user = tux_project.split('@')
endpoint = f'groups/{tux_group}/projects/{tux_user}/{result_type.lower()}s/{tux_uid}/cancel'
url = urljoin(self.data.url, endpoint)
response = requests.post(url)
testjob.fetched = True
testjob.submitted = True
testjob.job_status = "Canceled"
testjob.save()
return response.status_code == 200
def supports_callbacks(self):
return True
def validate_callback(self, request, project):
signature = request.headers.get("x-tux-payload-signature", None)
if signature is None:
raise Exception("tuxsuite request is missing signature headers")
public_key = project.get_setting("TUXSUITE_PUBLIC_KEY")
if public_key is None:
raise Exception("missing tuxsuite public key for this project")
payload = json.loads(request.body)
signature = base64.urlsafe_b64decode(signature)
key = serialization.load_ssh_public_key(public_key.encode("ascii"))
key.verify(
signature,
payload.encode("utf-8"),
ec.ECDSA(hashes.SHA256()),
)
def process_callback(self, json_payload, build, environment, backend):
# The payload coming from Tuxsuite is formatted as bytes,
# so after the first json.loads(request.body), the result
# will still be a string containing the actual json document
# We need to call json.loads() once more to get the actual
# python dict containing all the information we need
json_payload = json.loads(json_payload)
if "kind" not in json_payload or "status" not in json_payload:
raise Exception("`kind` and `status` are required in the payload")
kind = json_payload["kind"]
status = json_payload["status"]
env = status.get("target_arch") or status.get("device") or environment
job_id = self.generate_job_id(kind, status)
try:
# Tuxsuite's job id DO NOT repeat, like ever
testjob = TestJob.objects.get(job_id=job_id, target_build=build, environment=env)
except TestJob.DoesNotExist:
testjob = TestJob.objects.create(
backend=backend,
target=build.project,
target_build=build,
environment=env,
submitted=True,
job_id=job_id
)
# Saves the input so it can be processed by the queue
testjob.input = json.dumps(status)
return testjob
|
class Backend(BaseBackend):
def has_resubmit(self):
pass
def has_cancel(self):
pass
@staticmethod
def get_session():
pass
def generate_test_name(self, results):
'''
Generates a name based on toolchain and config. Here are few examples:
1) toolchain: gcc-9, kconfig: ['defconfig']
-> returns 'gcc-9-defconfig'
2) toolchain: gcc-9, kconfig: ['defconfig', 'CONFIG_LALA=y']
-> returns 'gcc-9-defconfig-6bbfee93'
-> hashlib.sha1('CONFIG_LALA=y')[0:8]
3) toolchain: gcc-9, kconfig: ['defconfig', 'CONFIG_LALA=y', 'https://some.com/kconfig']
-> returns 'gcc-9-defconfig-12345678'
-> hashlib.sha1(
sorted(
'CONFIG_LALA=y',
'https://some.com/kconfig',
)
)
'''
pass
def parse_job_id(self, job_id):
'''
Parsing the job id means getting back specific TuxSuite information
from job_id. Ex:
Given a job_id = "BUILD:linaro@anders#1yPYGaOEPNwr2pCqBgONY43zORq",
the return value should be a tuple like
('BUILD', 'linaro@anders', '1yPYGaOEPNwr2pCqBgONY43zORq')
The leading string determines the type of the tuxsuite object:
- BUILD
- OEBUILD
- TEST
'''
pass
def generate_job_id(self, result_type, result):
'''
The job id for TuxSuite results is generated using 3 pieces of info:
1. If it's either "BUILD", "OEBUILD" or "TEST" result;
2. The TuxSuite project. Ex: "linaro/anders"
3. The ksuid of the object. Ex: "1yPYGaOEPNwr2pfqBgONY43zORp"
A couple examples for job_id are:
- BUILD:linaro@anders#1yPYGaOEPNwr2pCqBgONY43zORq
- OEBUILD:linaro@lkft#2Wetiz7Qs0TbtfPgPT7hUObWqDK
- TEST:arm@bob#1yPYGaOEPNwr2pCqBgONY43zORp
Then it's up to SQUAD's TuxSuite backend to parse the job_id
and fetch results properly.
'''
pass
def fetch_url(self, *urlbits):
pass
def fetch_from_results_input(self, test_job):
pass
def set_build_name(self, test_job, job_url, results, metadata, settings):
'''
Tuxsuite allows plans with builds and tests within.
Some of these plans also support "special tests", which are
kind a sanity test to run before spinning a heavy load of tests.
Here's the default plan hierarchy:
- build -> tests
Now with sanity tests in between:
- build -> sanity tests -> tests
SQUAD needs to get to the build level in
order to retrieve the build object and finally retrieve
its build name attribute
'''
pass
def parse_build_results(self, test_job, job_url, results, settings):
pass
def parse_oebuild_results(self, test_job, job_url, results, settings):
pass
def update_metadata_from_file(self, results, metadata):
pass
def parse_test_results(self, test_job, job_url, results, settings):
pass
def fetch_url(self, *urlbits):
pass
def job_url(self, test_job):
pass
def __check_required_keys__(self, required_keys, results):
pass
def __resolve_settings__(self, test_job):
pass
def cancel(self, testjob):
pass
def supports_callbacks(self):
pass
def validate_callback(self, request, project):
pass
def process_callback(self, json_payload, build, environment, backend):
pass
| 23 | 4 | 21 | 4 | 13 | 4 | 3 | 0.33 | 1 | 11 | 4 | 0 | 20 | 0 | 21 | 40 | 459 | 95 | 277 | 116 | 253 | 91 | 252 | 113 | 229 | 12 | 1 | 3 | 62 |
145,792 |
Linaro/squad
|
Linaro_squad/squad/ci/backend/lava.py
|
squad.ci.backend.lava.RequestsTransport
|
class RequestsTransport(xmlrpclib.SafeTransport):
"""
Drop in Transport for xmlrpclib that uses Requests instead of http.client
"""
# change our user agent to reflect Requests
user_agent = "Python XMLRPC with Requests (python-requests.org)"
def __init__(self, use_https=True, cert=None, verify=None, *args, **kwargs):
self.cert = cert
self.verify = verify
self.use_https = use_https
self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
if 'timeout' in kwargs:
self.timeout = kwargs.pop('timeout')
xmlrpclib.Transport.__init__(self, *args, **kwargs)
def request(self, host, handler, request_body, verbose):
"""
Make an xmlrpc request.
"""
headers = {'User-Agent': self.user_agent}
url = self._build_url(host, handler)
try:
resp = requests.post(url, data=request_body, headers=headers,
stream=True,
cert=self.cert, verify=self.verify,
timeout=self.timeout)
except ValueError:
raise
except Exception:
raise # something went wrong
else:
try:
resp.raise_for_status()
except requests.RequestException as e:
raise xmlrpclib.ProtocolError(url, resp.status_code,
str(e), resp.headers)
else:
self.verbose = verbose
return self.parse_response(resp.raw)
def _build_url(self, host, handler):
"""
Build a url for our request based on the host, handler and use_https
property
"""
scheme = 'https' if self.use_https else 'http'
return '%s://%s/%s' % (scheme, host, handler)
|
class RequestsTransport(xmlrpclib.SafeTransport):
'''
Drop in Transport for xmlrpclib that uses Requests instead of http.client
'''
def __init__(self, use_https=True, cert=None, verify=None, *args, **kwargs):
pass
def request(self, host, handler, request_body, verbose):
'''
Make an xmlrpc request.
'''
pass
def _build_url(self, host, handler):
'''
Build a url for our request based on the host, handler and use_https
property
'''
pass
| 4 | 3 | 13 | 0 | 11 | 3 | 3 | 0.35 | 1 | 5 | 0 | 0 | 3 | 5 | 3 | 16 | 51 | 6 | 34 | 15 | 30 | 12 | 30 | 14 | 26 | 4 | 2 | 2 | 8 |
145,793 |
Linaro/squad
|
Linaro_squad/squad/ci/backend/lava.py
|
squad.ci.backend.lava.Backend
|
class Backend(BaseBackend):
# ------------------------------------------------------------------------
# API implementation
# ------------------------------------------------------------------------
def submit(self, test_job):
test_job.name = self.__lava_job_name(test_job.definition)
with self.handle_job_submission():
job_id = self.__submit__(test_job.definition)
# in case LAVA doesn't respond 201 or any of the error
# codes, the job list might be empty. Raise exception
# should such condition happen.
if not job_id:
raise TemporarySubmissionIssue("LAVA returned empty job ID list")
if isinstance(job_id, list):
return job_id
return [job_id]
def has_cancel(self):
return True
def cancel(self, test_job):
if test_job.submitted and test_job.job_id is not None:
return self.__cancel_job__(test_job.job_id)
return False
def fetch(self, test_job):
try:
data = self.__get_job_details__(test_job.job_id)
status_key = 'status'
if not self.use_xml_rpc:
status_key = 'state'
if data[status_key] in self.complete_statuses:
# fill in start and end datetime for the job
start_time = data.get('start_time', None)
end_time = data.get('end_time', None)
# convert to datetime
if type(start_time) is str:
try:
start_time = isoparse(start_time)
except ValueError:
start_time = None
if type(end_time) is str:
try:
end_time = isoparse(end_time)
except ValueError:
end_time = None
test_job.started_at = start_time
test_job.ended_at = end_time
test_job.failure = None
test_job.save()
data['results'] = self.__get_testjob_results_yaml__(test_job.job_id)
# fetch logs
raw_logs = BytesIO()
try:
raw_logs = BytesIO(self.__download_full_log__(test_job.job_id))
except Exception:
self.log_warn(("Logs for job %s are not available" % test_job.job_id) + "\n" + traceback.format_exc())
return self.__parse_results__(data, test_job, raw_logs)
except xmlrpc.client.ProtocolError as error:
raise TemporaryFetchIssue(self.url_remove_token(str(error)))
except xmlrpc.client.Fault as fault:
if fault.faultCode // 100 == 5:
# assume HTTP errors 5xx are temporary issues
raise TemporaryFetchIssue(self.url_remove_token(str(fault)))
else:
raise FetchIssue(self.url_remove_token(str(fault)))
except ssl.SSLError as fault:
raise FetchIssue(self.url_remove_token(str(fault)))
except requests.exceptions.RequestException as fault:
if isinstance(fault, requests.exceptions.Timeout):
# assume HTTP errors 5xx are temporary issues
raise TemporaryFetchIssue(self.url_remove_token(str(fault)))
else:
raise FetchIssue(self.url_remove_token(str(fault)))
def listen(self):
if not self.listen_websocket():
self.listen_zmq()
def listen_websocket(self):
async def handler():
url = urlparse(self.data.url)
ws_url = f"{url.scheme}://{self.data.username}:{self.data.token}@{url.netloc}/ws/"
try:
while True:
try:
async with aiohttp.ClientSession() as session:
self.log_debug(f"connecting to {url.scheme}://{url.netloc}/ws/")
async with session.ws_connect(ws_url, heartbeat=30) as ws:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
try:
(topic, uuid, dt, username, data) = (m for m in msg.json()[:])
data = json.loads(data)
if "error" in data:
raise aiohttp.ClientError(data["error"])
except ValueError:
continue
await sync_to_async(self.receive_event)(topic, data)
await asyncio.sleep(1)
except aiohttp.ClientError as e:
self.log_warn(f"Failed to start client: {e}")
return False
except Exception as e:
# Fall back to ZMQ
self.log_warn(f"Failed to maintain websocket connection: {e}")
return False
asyncio.run(handler())
def listen_zmq(self):
listener_url = self.get_listener_url()
if not listener_url:
self.log_warn("Can't connect, no listener URL")
if self.data is not None and hasattr(self.data, "name"):
self.log_warn("Can't listen to %s backend" % self.data.name)
self.log_debug("connecting to %s" % listener_url)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt_string(zmq.SUBSCRIBE, "")
try:
# requires PyZMQ to be built against ZeroMQ 4.2+
self.socket.setsockopt(zmq.HEARTBEAT_IVL, 1000) # 1 s
self.socket.setsockopt(zmq.HEARTBEAT_TIMEOUT, 10000) # 10 s
except AttributeError:
self.log_warn('PyZMQ has no support for heartbeat (requires ZeroMQ library 4.2+), connection may be unstable')
pass
self.socket.connect(listener_url)
self.log_debug("connected to %s" % listener_url)
while True:
try:
message = self.socket.recv_multipart()
(topic, uuid, dt, username, data) = (u(m) for m in message[:])
data = json.loads(data)
self.receive_event(topic, data)
except Exception as e:
self.log_error(str(e) + "\n" + traceback.format_exc())
def job_url(self, test_job):
url = urlsplit(self.data.url)
joburl = '%s://%s/scheduler/job/%s' % (
url.scheme,
url.netloc,
test_job.job_id
)
return joburl
# ------------------------------------------------------------------------
# implementation details
# ------------------------------------------------------------------------
def __init__(self, data):
super(Backend, self).__init__(data)
self.complete_statuses = ['Complete', 'Incomplete', 'Canceled', 'Finished']
self.__proxy__ = None
self.use_xml_rpc = True
url = None
self.authentication = None
if self.data:
url = urlsplit(self.data.url)
if url:
if url.path.find("RPC2") < 0 and url.path.find("api") > 0:
self.use_xml_rpc = False
self.api_url_base = '%s://%s%s' % (
url.scheme,
url.netloc,
url.path
)
# make sure URL ens with trailing slash
if not self.api_url_base.endswith("/"):
self.api_url_base = self.api_url_base + "/"
self.authentication = {
"Authorization": "Token %s" % self.data.token,
}
@contextmanager
def handle_job_submission(self):
try:
yield
except xmlrpc.client.ProtocolError as error:
raise TemporarySubmissionIssue(self.url_remove_token(str(error)))
except xmlrpc.client.Fault as fault:
if fault.faultCode // 100 == 5 or fault.faultCode == 408:
# assume HTTP errors 5xx are temporary issues
# consider 408 as TemporarySubmissionIssue, as it's considered as timeout
raise TemporarySubmissionIssue(self.url_remove_token(str(fault)))
else:
raise SubmissionIssue(self.url_remove_token(str(fault)))
except ssl.SSLError as fault:
raise SubmissionIssue(self.url_remove_token(str(fault)))
except ConnectionRefusedError as fault:
raise TemporarySubmissionIssue(str(fault))
except requests.exceptions.HTTPError as fault:
raise TemporarySubmissionIssue(str(fault))
def url_remove_token(self, text):
if self.data is not None and self.data.token is not None:
return text.replace(self.data.token, "*****")
return text
@property
def proxy(self):
if self.__proxy__ is None:
url = urlsplit(self.data.url)
endpoint = '%s://%s:%s@%s%s' % (
url.scheme,
self.data.username,
self.data.token,
url.netloc,
url.path
)
use_https = True
if url.scheme == 'http':
use_https = False
proxy_timeout = self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
self.__proxy__ = xmlrpclib.ServerProxy(
endpoint,
transport=RequestsTransport(timeout=proxy_timeout, use_https=use_https, use_builtin_types=True),
use_builtin_types=True
)
return self.__proxy__
def get_listener_url(self):
url = urlsplit(self.data.url)
hostname = url.netloc
# remove port if exists
hostname = hostname.split(":", 1)[0]
socket = self.__get_publisher_event_socket__()
if not socket:
return None
socket_url = urlsplit(socket)
port = socket_url.port
if socket_url.hostname != '*':
hostname = socket_url.hostname
scheme = socket_url.scheme
return '%s://%s:%s' % (scheme, hostname, port)
def has_resubmit(self):
return True
def resubmit(self, test_job):
with self.handle_job_submission():
new_job_id_list = self.__resubmit__(test_job.job_id)
# in case LAVA doesn't respond 201 or any of the error
# codes, the job list might be empty. Raise exception
# should such condition happen.
if not new_job_id_list:
raise TemporarySubmissionIssue("LAVA returned empty job ID list")
if isinstance(new_job_id_list, list):
new_job_id = new_job_id_list[0]
else:
new_job_id = new_job_id_list
new_test_job_name = None
if test_job.definition is not None:
new_test_job_name = self.__lava_job_name(test_job.definition)
new_test_job = TestJob(
backend=self.data,
definition=test_job.definition,
target=test_job.target,
target_build=test_job.target_build,
environment=test_job.environment,
submitted=True,
job_id=new_job_id,
resubmitted_count=test_job.resubmitted_count + 1,
name=new_test_job_name,
parent_job=test_job,
)
test_job.can_resubmit = False
test_job.save()
new_test_job.save()
if isinstance(new_job_id_list, list) and len(new_job_id_list) > 1:
for job_id in new_job_id_list[1:]:
new_test_job.pk = None
new_test_job.job_id = job_id
new_test_job.save()
return new_test_job
def __cancel_job__(self, job_id):
if self.use_xml_rpc:
try:
self.proxy.scheduler.cancel_job(job_id)
return True
except (xmlrpc.client.ProtocolError,
xmlrpc.client.Fault,
ssl.SSLError):
return False
else:
response = requests.post(
urljoin(self.api_url_base, "jobs/%s/cancel/" % (job_id)),
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
if response.status_code == 200:
return True
return False
def __lava_job_name(self, definition):
yaml_definition = yaml.safe_load(definition)
if type(yaml_definition) is dict and 'job_name' in yaml_definition.keys():
job_name = yaml_definition['job_name']
# only return first 255 characters
return job_name[:255] if job_name else ''
return None
def __resubmit__(self, job_id):
if self.use_xml_rpc:
return self.proxy.scheduler.resubmit_job(job_id)
response = requests.post(
urljoin(self.api_url_base, "jobs/%s/resubmit/" % (job_id)),
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
# in case LAVA responds with one of the 4XX or 5XX codes
# raise issue with proper message sent to the UI
response.raise_for_status()
if response.status_code == 201:
return response.json()['job_ids']
return []
def __submit__(self, definition):
if self.use_xml_rpc:
return self.proxy.scheduler.submit_job(definition)
response = requests.post(
urljoin(self.api_url_base, "jobs/"),
headers=self.authentication,
data={"definition": definition},
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
if response.status_code == 201:
return response.json()['job_ids']
return []
def __get_job_details__(self, job_id):
if self.use_xml_rpc:
return self.proxy.scheduler.job_details(job_id)
response = requests.get(
urljoin(self.api_url_base, "jobs/%s" % (job_id)),
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
if response.status_code == 200:
return response.json()
raise FetchIssue(response.text)
def __download_full_log__(self, job_id):
response = None
if self.use_xml_rpc:
url = self.data.url.replace('/RPC2', '/scheduler/job/%s/log_file/plain' % job_id)
payload = {"user": self.data.username, "token": self.data.token}
try:
response = requests.get(
url,
params=payload,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
except requests.exceptions.RequestException:
self.log_error("Unable to download log for {backend_name}/{job_id}".format(backend_name=self.data.name, job_id=job_id))
else:
try:
response = requests.get(
urljoin(self.api_url_base, "jobs/%s/logs/" % (job_id)),
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
except requests.exceptions.RequestException:
self.log_error("Unable to download log for {backend_name}/{job_id}".format(backend_name=self.data.name, job_id=job_id))
if response and response.status_code == 200:
return response.content
return b''
def __download_test_log__(self, raw_log, log_start, log_end):
if not log_start:
return ""
return_lines = StringIO()
log_start_line = int(log_start)
log_end_line = None
if log_end:
log_end_line = int(log_end)
else:
log_end_line = log_start_line + 2 # LAVA sometimes misses the signals
raw_log.seek(0)
counter = 0
for line in raw_log:
counter += 1
if counter < log_start_line:
continue
try:
return_lines.write(line.decode("utf-8"))
except UnicodeDecodeError:
return_lines.write(line.decode("iso-8859-1"))
return_lines.write("\n")
if counter >= log_end_line:
break
raw_log.seek(0)
return return_lines.getvalue()
def __parse_log__(self, log_data):
returned_log = StringIO()
start_dict = False
tmp_dict = None
tmp_key = None
is_value = False
self.log_debug("Length of log buffer: %s" % log_data.getbuffer().nbytes)
if log_data.getbuffer().nbytes == 0:
return ""
try:
for event in yaml.parse(log_data, Loader=yaml.CLoader):
if isinstance(event, yaml.MappingStartEvent):
start_dict = True
tmp_dict = {}
if isinstance(event, yaml.MappingEndEvent):
start_dict = False
if tmp_dict and tmp_dict.get('lvl') in ['target', 'feedback'] and 'msg' in tmp_dict.keys():
if isinstance(tmp_dict['msg'], bytes):
try:
# seems like latin-1 is the encoding used by serial
# this might not be true in all cases
returned_log.write(tmp_dict["msg"].decode('latin-1', 'ignore') + "\n")
except ValueError:
# despite ignoring errors, they are still raised sometimes
pass
else:
returned_log.write(tmp_dict['msg'] + "\n")
del tmp_dict
tmp_dict = None
is_value = False
if start_dict is True and isinstance(event, yaml.ScalarEvent):
if is_value is False:
# the event.value is a dict key
tmp_key = event.value
is_value = True
else:
# the event.value is a dict value
tmp_dict.update({tmp_key: event.value})
is_value = False
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
log_data.seek(0)
wrapper = TextIOWrapper(log_data, encoding='utf-8')
self.log_error("Problem parsing LAVA log\n" + wrapper.read() + "\n" + traceback.format_exc())
return returned_log.getvalue()
def __get_testjob_results_yaml__(self, job_id):
self.log_debug("Retrieving result summary for job: %s" % job_id)
lava_job_results = []
if self.use_xml_rpc:
suites = self.proxy.results.get_testjob_suites_list_yaml(job_id)
y = yaml.safe_load(suites)
for suite in y:
limit = 500
offset = 0
while True:
self.log_debug(
"requesting results for %s with offset of %s"
% (suite['name'], offset)
)
results = self.proxy.results.get_testsuite_results_yaml(
job_id,
suite['name'],
limit,
offset)
yaml_results = yaml.load(results, Loader=yaml.CLoader)
lava_job_results = lava_job_results + yaml_results
if len(yaml_results) == limit:
offset = offset + limit
else:
break
else:
suites_resp = requests.get(
urljoin(self.api_url_base, "jobs/%s/suites/" % (job_id)),
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
while suites_resp.status_code == 200:
suites_content = suites_resp.json()
for suite in suites_content['results']:
tests_resp = requests.get(
urljoin(self.api_url_base, "jobs/%s/suites/%s/tests" % (job_id, suite['id'])),
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
while tests_resp.status_code == 200:
tests_content = tests_resp.json()
for test in tests_content['results']:
test['suite'] = suite['name']
lava_job_results = lava_job_results + tests_content['results']
if tests_content['next']:
tests_resp = requests.get(
tests_content['next'],
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
else:
break
if suites_content['next']:
suites_resp = requests.get(
suites_content['next'],
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
else:
break
return lava_job_results
def __get_publisher_event_socket__(self):
if self.use_xml_rpc:
return self.proxy.scheduler.get_publisher_event_socket()
lava_resp = requests.get(
urljoin(self.api_url_base, "system/master_config/"),
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
if lava_resp.status_code == 200:
return lava_resp.json()['EVENT_SOCKET']
# should there be an exception if status_code is != 200 ?
return None
def __resolve_settings__(self, test_job):
result_settings = self.settings
if getattr(test_job, 'target', None) is not None \
and test_job.target.project_settings is not None:
ps = yaml.safe_load(test_job.target.project_settings) or {}
result_settings.update(ps)
return result_settings
def __parse_results__(self, data, test_job, raw_logs):
project_settings = self.__resolve_settings__(test_job)
handle_lava_suite = project_settings.get('CI_LAVA_HANDLE_SUITE', False)
handle_lava_boot = project_settings.get('CI_LAVA_HANDLE_BOOT', False)
clone_measurements_to_tests = project_settings.get('CI_LAVA_CLONE_MEASUREMENTS', False)
ignore_infra_errors = project_settings.get('CI_LAVA_WORK_AROUND_INFRA_ERRORS', False)
definition = yaml.safe_load(data['definition'])
test_job.name = definition['job_name'][:255]
job_metadata = definition.get('metadata', {})
suite_versions = {}
for key, value in job_metadata.items():
if key.endswith('__version'):
suite_versions[key.replace('__version', '')] = value
if suite_versions:
job_metadata['suite_versions'] = suite_versions
results = {}
metrics = {}
completed = True
status_key = 'status'
if not self.use_xml_rpc:
status_key = 'health'
if data[status_key] == 'Canceled':
# consider all canceled jobs as incomplete and discard any results
completed = False
else:
for result in data['results']:
if handle_lava_suite or result['suite'] != 'lava':
suite = result['suite'].split("_", 1)[-1]
res_name = "%s/%s" % (suite, result['name'])
res_log = None
if 'log_start_line' in result.keys():
log_url = f"{self.job_url(test_job)}#L{result['log_start_line']}"
res_log = f"Testcase log: <a href='{log_url}'>{log_url}</a>\n"
if 'log_end_line' in result.keys() and \
result['log_start_line'] is not None and \
result['log_end_line'] is not None:
res_log += self.__download_test_log__(raw_logs, result['log_start_line'], result['log_end_line'])
# YAML from LAVA has all values serialized to strings
if result['measurement'] == 'None' or result['measurement'] is None:
res_value = result['result']
results.update({res_name: {'result': res_value, 'log': res_log}})
else:
res_value = result['measurement']
try:
unit = result['unit']
except KeyError:
# work around the bug in LAVA
# https://git.lavasoftware.org/lava/lava/-/issues/449
unit = result.get('units', 'items')
metrics.update({res_name: {'value': float(res_value), 'unit': unit}})
if clone_measurements_to_tests:
res_value = result['result']
results.update({res_name: res_value})
elif result['name'] == 'auto-login-action' and handle_lava_boot:
# add artificial 'boot' test result for each test job
# by default the boot test is named after the device_type
boot = "boot-%s" % test_job.name
res_name = "%s/%s" % (boot, definition['device_type'])
res_time_name = "%s/time-%s" % (boot, definition['device_type'])
if 'testsuite' in job_metadata.keys():
# If 'testsuite' metadata key is present in the job
# it's appended to the test name. This way regressions can
# be found with more granularity
res_name = "%s-%s" % (res_name, job_metadata['testsuite'])
try:
unit = result['unit']
except KeyError:
# work around the bug in LAVA
# https://git.lavasoftware.org/lava/lava/-/issues/449
unit = result.get('units', 'items')
results.update({res_name: result['result']})
metrics.update({res_time_name: {'value': float(result['measurement']), 'unit': unit}})
# Handle failed lava jobs
if result['suite'] == 'lava' and result['name'] == 'job' and result['result'] == 'fail':
metadata = result['metadata']
if isinstance(metadata, str):
metadata = yaml.safe_load(metadata)
test_job.failure = str(metadata)
test_job.save()
error_type = metadata.get('error_type', None)
# detect jobs failed because of infrastructure issues
if error_type in ['Infrastructure', 'Job', 'Lava']:
if not ignore_infra_errors:
completed = False
# automatically resubmit in some cases
if error_type in ['Infrastructure', 'Job', 'Test']:
self.__resubmit_job__(test_job, metadata)
return (data[status_key], completed, job_metadata, results, metrics, self.__parse_log__(raw_logs))
def __resubmit_job__(self, test_job, metadata):
infra_messages_re_list = []
project_settings = self.__resolve_settings__(test_job)
error_messages_settings = project_settings.get('CI_LAVA_INFRA_ERROR_MESSAGES', [])
for message_re in error_messages_settings:
try:
r = re.compile(message_re, re.I)
infra_messages_re_list.append(r)
except re.error:
# ignore incorrect expressions
self.log_debug("'%s' is not a valid regex" % message_re)
for regex in infra_messages_re_list:
error_msg = metadata.get('error_msg')
if error_msg and regex.search(error_msg) is not None and \
test_job.resubmitted_count < 3:
resubmitted_job = self.resubmit(test_job)
if project_settings.get('CI_LAVA_SEND_ADMIN_EMAIL', True):
# delay sending email by 15 seconds to allow the database object to be saved
send_testjob_resubmit_admin_email.apply_async(args=[test_job.pk, resubmitted_job.pk], countdown=15)
# re-submit the job only once
# even if there are more matches
break
def receive_event(self, topic, data):
if topic.split('.')[-1] != "testjob":
return
lava_id = data.get('job')
if not lava_id:
return
if 'sub_id' in data.keys():
lava_id = data['sub_id']
lava_status = data.get('state', 'Unknown')
db_test_job_list = self.data.test_jobs.filter(
submitted=True,
fetched=False,
job_id=lava_id)
if db_test_job_list.exists() and \
len(db_test_job_list) == 1:
self.log_debug("interesting message received: %r" % data)
else:
return
job = db_test_job_list[0]
job.job_status = lava_status
if lava_status == 'Finished':
lava_health = data.get('health', 'Unknown')
job.job_status = lava_health
if job.name is None:
# fetch job name once
data = self.__get_job_details__(lava_id)
definition = yaml.safe_load(data['definition'])
job.name = definition['job_name'][:255]
job.save()
if job.job_status in self.complete_statuses:
self.log_info("scheduling fetch for job %s" % job.job_id)
fetch.apply_async(args=[job.id])
def check_job_definition(self, definition):
try:
yaml.safe_load(definition)
return True
except yaml.YAMLError as e:
return str(e)
def get_job_definition(self, job_id):
if self.use_xml_rpc:
return self.proxy.scheduler.jobs.definition(job_id)
job_resp = requests.get(
f'{self.api_url_base}/jobs/{job_id}/definition/',
headers=self.authentication,
timeout=self.settings.get(timeout_variable_name, DEFAULT_TIMEOUT)
)
if job_resp.status_code == 200:
return job_resp.json()
|
class Backend(BaseBackend):
def submit(self, test_job):
pass
def has_cancel(self):
pass
def cancel(self, test_job):
pass
def fetch(self, test_job):
pass
def listen(self):
pass
def listen_websocket(self):
pass
async def handler():
pass
def listen_zmq(self):
pass
def job_url(self, test_job):
pass
def __init__(self, data):
pass
@contextmanager
def handle_job_submission(self):
pass
def url_remove_token(self, text):
pass
@property
def proxy(self):
pass
def get_listener_url(self):
pass
def has_resubmit(self):
pass
def resubmit(self, test_job):
pass
def __cancel_job__(self, job_id):
pass
def __lava_job_name(self, definition):
pass
def __resubmit__(self, job_id):
pass
def __submit__(self, definition):
pass
def __get_job_details__(self, job_id):
pass
def __download_full_log__(self, job_id):
pass
def __download_test_log__(self, raw_log, log_start, log_end):
pass
def __parse_log__(self, log_data):
pass
def __get_testjob_results_yaml__(self, job_id):
pass
def __get_publisher_event_socket__(self):
pass
def __resolve_settings__(self, test_job):
pass
def __parse_results__(self, data, test_job, raw_logs):
pass
def __resubmit_job__(self, test_job, metadata):
pass
def receive_event(self, topic, data):
pass
def check_job_definition(self, definition):
pass
def get_job_definition(self, job_id):
pass
| 35 | 0 | 22 | 1 | 20 | 2 | 5 | 0.09 | 1 | 23 | 6 | 0 | 31 | 8 | 31 | 50 | 710 | 57 | 602 | 159 | 567 | 55 | 487 | 147 | 454 | 21 | 1 | 9 | 160 |
145,794 |
Linaro/squad
|
Linaro_squad/squad/ci/backend/fake.py
|
squad.ci.backend.fake.Backend
|
class Backend(object):
def __init__(self, data):
self.data = data
def submit(self, test_job):
return [str(test_job.id)]
def resubmit(self, test_job):
count = test_job.resubmitted_count + 1
new_jobid = '%s.%d' % (test_job.job_id, count)
new_job = TestJob.objects.create(
backend=self.data,
testrun=test_job.testrun,
target=test_job.target,
target_build=test_job.target_build,
environment=test_job.environment,
submitted=True,
job_id=new_jobid,
resubmitted_count=count,
definition=test_job.definition,
parent_job=test_job
)
return new_job
def fetch(self, test_job):
status = 'Finished'
completed = (random.randint(1, 20) <= 16) # 80% success rate
metadata = {"job_id": str(test_job.id), "foo": "bar"}
tests = {test: (random.randint(1, 10) <= 8) and "pass" or 'fail' for test in TESTS}
metrics = {metric: random.random() for metric in METRICS}
logs = "a fake log file\ndate: " + time.strftime('%c') + "\n"
return (status, completed, metadata, tests, metrics, logs)
def listen(self):
max_id = 0
while True:
time.sleep(random.randint(1, 5))
jobs = self.data.test_jobs.filter(
submitted=True,
fetched=False,
id__gt=max_id,
).order_by('id')
for job in jobs:
fetch.apply_async(args=[job.id])
max_id = job.id
def job_url(self, test_job):
return 'https://example.com/job/%s' % test_job.job_id
def has_resubmit(self):
return False
def has_cancel(self):
return True
def cancel(self, test_job):
return True
def check_job_definition(self, definition):
return True
def get_job_definition(self, test_job):
return "sample job definition"
def supports_callbacks(self):
return False
|
class Backend(object):
def __init__(self, data):
pass
def submit(self, test_job):
pass
def resubmit(self, test_job):
pass
def fetch(self, test_job):
pass
def listen(self):
pass
def job_url(self, test_job):
pass
def has_resubmit(self):
pass
def has_cancel(self):
pass
def cancel(self, test_job):
pass
def check_job_definition(self, definition):
pass
def get_job_definition(self, test_job):
pass
def supports_callbacks(self):
pass
| 13 | 0 | 5 | 0 | 5 | 0 | 1 | 0.02 | 1 | 2 | 1 | 0 | 12 | 1 | 12 | 12 | 67 | 12 | 55 | 26 | 42 | 1 | 40 | 26 | 27 | 3 | 1 | 2 | 14 |
145,795 |
Linaro/squad
|
Linaro_squad/squad/ci/apps.py
|
squad.ci.apps.CiConfig
|
class CiConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'squad.ci'
|
class CiConfig(AppConfig):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,796 |
Linaro/squad
|
Linaro_squad/squad/ci/admin.py
|
squad.ci.admin.TestJobFailureFilter
|
class TestJobFailureFilter(admin.SimpleListFilter):
title = _('Failed')
parameter_name = 'failed'
def lookups(self, request, model_admin):
return (
('yes', _('Yes')),
('no', _('No')),
)
def queryset(self, request, queryset):
if self.value() == 'yes':
return queryset.exclude(failure=None)
if self.value() == 'no':
return queryset.filter(failure=None)
return queryset
|
class TestJobFailureFilter(admin.SimpleListFilter):
def lookups(self, request, model_admin):
pass
def queryset(self, request, queryset):
pass
| 3 | 0 | 6 | 0 | 6 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 18 | 4 | 14 | 5 | 11 | 0 | 11 | 5 | 8 | 3 | 1 | 1 | 4 |
145,797 |
Linaro/squad
|
Linaro_squad/squad/ci/admin.py
|
squad.ci.admin.TestJobAdmin
|
class TestJobAdmin(admin.ModelAdmin):
list_display = ('backend', 'target', 'created_at', 'submitted', 'submitted_at', 'fetched', 'last_fetch_attempt', 'success', 'job_id_link',)
list_filter = ('backend', 'target', 'submitted', 'fetched', TestJobFailureFilter)
readonly_fields = ('testrun', 'target_build', 'parent_job')
actions = [submit_job, fetch_job]
def job_id_link(self, test_job):
if test_job.url:
return '<a href="%s">%s</a>' % (test_job.url, test_job.job_id)
else:
return test_job.job_id
job_id_link.allow_tags = True
job_id_link.short_description = 'Job ID ⇒'
|
class TestJobAdmin(admin.ModelAdmin):
def job_id_link(self, test_job):
pass
| 2 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 13 | 1 | 12 | 6 | 10 | 0 | 11 | 6 | 9 | 2 | 1 | 1 | 2 |
145,798 |
Linaro/squad
|
Linaro_squad/squad/ci/admin.py
|
squad.ci.admin.BackendAdmin
|
class BackendAdmin(NoDeleteListingModelAdmin):
list_display = ('name', 'url', 'implementation_type', 'listen_enabled', 'poll_enabled', 'poll_interval', 'max_fetch_attempts')
actions = [poll_backends]
|
class BackendAdmin(NoDeleteListingModelAdmin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
145,799 |
Linaro/squad
|
Linaro_squad/squad/ci/migrations/0002_auto_20170406_1252.py
|
squad.ci.migrations.0002_auto_20170406_1252.Migration
|
class Migration(migrations.Migration):
dependencies = [
('ci', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='backend',
name='listener_filter',
field=models.CharField(blank=True, max_length=1024, null=True),
),
migrations.AddField(
model_name='backend',
name='listener_url',
field=models.URLField(blank=True, null=True),
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 2 | 16 | 3 | 15 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,800 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0090_populate_test_has_known_issues.py
|
squad.core.migrations.0090_populate_test_has_known_issues.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0089_test_has_known_issues'),
]
if SQLITE:
operations = []
else:
operations = [
migrations.RunSQL(SQL, reverse_sql=migrations.RunSQL.noop),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 10 | 3 | 9 | 0 | 5 | 3 | 4 | 0 | 1 | 1 | 0 |
145,801 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0072_group_description.py
|
squad.core.migrations.0072_group_description.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0071_migrate_old_tokens'),
]
operations = [
migrations.AddField(
model_name='group',
name='description',
field=models.TextField(null=True),
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 11 | 3 | 10 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,802 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0092_annotation.py
|
squad.core.migrations.0092_annotation.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0091_notification_delivery_remove_unique_status'),
]
operations = [
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=1024)),
('build', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='core.Build')),
],
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 2 | 14 | 3 | 13 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,803 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.BuildSummary
|
class BuildSummary(models.Model, TestSummaryBase):
build = models.ForeignKey(Build, related_name='metrics_summary', on_delete=models.CASCADE)
environment = models.ForeignKey(Environment, on_delete=models.CASCADE)
metrics_summary = models.FloatField(null=True)
has_metrics = models.BooleanField(default=False)
tests_pass = models.IntegerField(default=0)
tests_fail = models.IntegerField(default=0)
tests_xfail = models.IntegerField(default=0)
tests_skip = models.IntegerField(default=0)
test_runs_total = models.IntegerField(default=0)
test_runs_completed = models.IntegerField(default=0)
test_runs_incomplete = models.IntegerField(default=0)
class Meta:
unique_together = ('build', 'environment',)
@classmethod
def create_or_update(cls, build, environment):
"""
Creates (or updates) a BuildSummary given build/environment and
returns it.
"""
metrics_summary = MetricsSummary(build, environment)
test_summary = TestSummary(build, environment)
test_runs_total = build.test_runs.filter(environment=environment).count()
test_runs_completed = build.test_runs.filter(environment=environment, completed=True).count()
test_runs_incomplete = build.test_runs.filter(environment=environment, completed=False).count()
data = {
'metrics_summary': metrics_summary.value,
'has_metrics': metrics_summary.has_metrics,
'tests_pass': test_summary.tests_pass,
'tests_fail': test_summary.tests_fail,
'tests_xfail': test_summary.tests_xfail,
'tests_skip': test_summary.tests_skip,
'test_runs_total': test_runs_total,
'test_runs_completed': test_runs_completed,
'test_runs_incomplete': test_runs_incomplete,
}
try:
# Occasionally, there might be scenarios where multiple threads call the line below,
# which isn't thread-safe. Thus allowing multiple invokations of `.get()`, returning
# no object, and multiple creations. To solve that, a constraint was created that doesn't
# allow creating multiple summaries with the same build/environment combination.
# Therefore, when multiple saves are invoked, an IntegrityError will be raised, but
# it is OK to ignore it, meaning another thread already got a hold of the object
summary, created = cls.objects.get_or_create(build=build, environment=environment, defaults=data)
except IntegrityError:
return
if not created:
summary.metrics_summary = metrics_summary.value
summary.has_metrics = metrics_summary.has_metrics
summary.tests_pass = test_summary.tests_pass
summary.tests_fail = test_summary.tests_fail
summary.tests_xfail = test_summary.tests_xfail
summary.tests_skip = test_summary.tests_skip
summary.test_runs_total = test_runs_total
summary.test_runs_completed = test_runs_completed
summary.test_runs_incomplete = test_runs_incomplete
summary.save()
return summary
|
class BuildSummary(models.Model, TestSummaryBase):
class Meta:
@classmethod
def create_or_update(cls, build, environment):
'''
Creates (or updates) a BuildSummary given build/environment and
returns it.
'''
pass
| 4 | 1 | 47 | 4 | 33 | 10 | 3 | 0.21 | 2 | 2 | 2 | 0 | 0 | 0 | 1 | 7 | 65 | 7 | 48 | 23 | 44 | 10 | 37 | 22 | 34 | 3 | 2 | 1 | 3 |
145,804 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.Callback
|
class Callback(models.Model):
"""
This is class will be responsible for handling callbacks in SQUAD
"""
url = models.URLField(max_length=1024, default=None, null=True, blank=True)
method = models.CharField(max_length=10, default=callback_methods.POST, validators=[callback_methods.validator])
event = models.CharField(max_length=64, validators=[callback_events.validator])
headers = models.TextField(
default=None,
null=True,
blank=True,
validators=[yaml_validator],
verbose_name=N_('HTTP headers (JSON-formatted) to be sent in this callback')
)
payload = models.TextField(
default=None,
null=True,
blank=True,
validators=[yaml_validator],
verbose_name=N_('Payload (JSON-formatted) to be sent in this callback')
)
payload_is_json = models.BooleanField(default=True)
is_sent = models.BooleanField(default=False)
record_response = models.BooleanField(default=False, verbose_name=N_('Should this callback response be recorded?'))
response_code = models.IntegerField(default=None, null=True, blank=True)
response_content = models.TextField(default=None, null=True, blank=True)
# Callbacks can belong to any model: Build, TestJob, TestRun, etc
# so it needs to have a generic foreign key
object_reference_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_reference_id = models.PositiveIntegerField()
object_reference = GenericForeignKey('object_reference_type', 'object_reference_id')
def dispatch(self):
dispatch_callback(self)
events = callback_events()
methods = callback_methods()
class Meta:
unique_together = ('object_reference_type', 'object_reference_id', 'url', 'event')
|
class Callback(models.Model):
'''
This is class will be responsible for handling callbacks in SQUAD
'''
def dispatch(self):
pass
class Meta:
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.16 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 46 | 9 | 32 | 19 | 29 | 5 | 20 | 19 | 17 | 1 | 1 | 0 | 1 |
145,805 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.CallbackForeignKey
|
class CallbackForeignKey(GenericRelation):
def __init__(self, **kwargs):
super().__init__(
Callback,
content_type_field='object_reference_type',
object_id_field='object_reference_id',
**kwargs)
|
class CallbackForeignKey(GenericRelation):
def __init__(self, **kwargs):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 1 | 7 | 0 | 7 | 2 | 5 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
145,806 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.CustomURLField
|
class CustomURLField(models.URLField):
default_validators = __default_url_validator__
def formfield(self, **kwargs):
return super(CustomURLField, self).formfield(**{
'form_class': CustomURLFormField,
})
|
class CustomURLField(models.URLField):
def formfield(self, **kwargs):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 1 | 7 | 1 | 6 | 3 | 4 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
145,807 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.CustomURLFormField
|
class CustomURLFormField(FormURLField):
default_validators = __default_url_validator__
|
class CustomURLFormField(FormURLField):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
145,808 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.DelayedReport
|
class DelayedReport(models.Model):
build = models.ForeignKey(Build, related_name="delayed_reports", on_delete=models.CASCADE)
baseline = models.ForeignKey('ProjectStatus', related_name="delayed_report_baselines", null=True, blank=True, on_delete=models.CASCADE)
output_format_choices = (('text/plain', 'text/plain'), ('text/html', 'text/html'))
output_format = models.CharField(max_length=32, choices=output_format_choices)
template = models.ForeignKey(EmailTemplate, null=True, blank=True, on_delete=models.CASCADE)
email_recipient = models.EmailField(null=True, blank=True)
email_recipient_notified = models.BooleanField(default=False)
callback = models.URLField(null=True, blank=True)
callback_token = models.CharField(max_length=128, null=True, blank=True)
callback_notified = models.BooleanField(default=False)
data_retention_days = models.PositiveSmallIntegerField(default=5, validators=[MaxValueValidator(30)])
output_subject = models.TextField(null=True, blank=True)
output_text = models.TextField(null=True, blank=True)
output_html = models.TextField(null=True, blank=True)
error_message = models.TextField(null=True, blank=True, validators=[yaml_validator])
status_code = models.PositiveSmallIntegerField(blank=True, null=True, validators=[MaxValueValidator(511), MinValueValidator(100)])
created_at = models.DateTimeField(auto_now_add=True)
def send(self):
recipients = [self.email_recipient]
if not recipients:
return
sender = "%s <%s>" % (settings.SITE_NAME, settings.EMAIL_FROM)
subject = "Custom report %s" % self.pk
message = Message(subject, self.output_text, sender, recipients)
if self.output_html:
message.attach_alternative(self.output_html, "text/html")
message.send()
self.email_recipient_notified = True
self.save()
|
class DelayedReport(models.Model):
def send(self):
pass
| 2 | 0 | 14 | 2 | 12 | 0 | 3 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 33 | 3 | 30 | 23 | 28 | 0 | 30 | 23 | 28 | 3 | 1 | 1 | 3 |
145,809 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.DisplayName
|
class DisplayName(object):
@property
def display_name(self):
return self.name or self.slug
|
class DisplayName(object):
@property
def display_name(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 2 | 1 | 0 | 1 | 1 | 5 | 1 | 4 | 3 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
145,810 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.EmailTemplate
|
class EmailTemplate(models.Model):
name = models.CharField(max_length=100, unique=True)
subject = models.CharField(
max_length=1024,
null=True,
blank=True,
help_text=N_('Jinja2 template for subject (single line)'),
validators=[jinja2_validator])
plain_text = models.TextField(help_text=N_('Jinja2 template for text/plain content'), validators=[jinja2_validator])
html = models.TextField(blank=True, null=True, help_text=N_('Jinja2 template for text/html content'), validators=[jinja2_validator])
# If any of the attributes need not to be tracked, just pass excluded_fields=['attr']
history = HistoricalRecords(cascade_delete_history=True)
def __str__(self):
return self.name
|
class EmailTemplate(models.Model):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0.08 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 16 | 2 | 13 | 7 | 11 | 1 | 8 | 7 | 6 | 1 | 1 | 0 | 1 |
145,811 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.Environment
|
class Environment(models.Model):
project = models.ForeignKey(Project, related_name='environments', on_delete=models.CASCADE)
slug = models.CharField(max_length=100, validators=[slug_validator], db_index=True)
name = models.CharField(max_length=100, null=True, blank=True)
expected_test_runs = models.IntegerField(default=0, null=True, blank=True)
description = models.TextField(null=True, blank=True)
class Meta:
unique_together = ('project', 'slug',)
def __str__(self):
return self.name or self.slug
|
class Environment(models.Model):
class Meta:
def __str__(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 12 | 2 | 10 | 9 | 7 | 0 | 10 | 9 | 7 | 1 | 1 | 0 | 1 |
145,812 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.Group
|
class Group(models.Model, DisplayName):
objects = GroupManager()
slug = models.CharField(max_length=100, unique=True, validators=[group_slug_validator], db_index=True, verbose_name=N_('Slug'))
valid_slug_pattern = slug_pattern
name = models.CharField(max_length=100, null=True, blank=True, verbose_name=N_('Name'))
description = models.TextField(null=True, blank=True, verbose_name=N_('Description'))
members = models.ManyToManyField(User, through='GroupMember', verbose_name=N_('Members'))
settings = models.TextField(null=True, blank=True, validators=[yaml_validator])
def add_user(self, user, access=None):
member = GroupMember(group=self, user=user)
if access:
member.access = access
member.save()
def add_admin(self, user):
self.add_user(user, 'admin')
def accessible_to(self, user):
return GroupMember.objects.filter(group=self, user=user.id).exists() or self.writable_by(user)
def can_submit_results(self, user):
return user.is_superuser or user.is_staff or self.has_access(user, 'admin', 'privileged', 'submitter')
def can_submit_testjobs(self, user):
return user.is_superuser or user.is_staff or self.has_access(user, 'admin', 'privileged')
def writable_by(self, user):
return user.is_superuser or user.is_staff or self.has_access(user, 'admin')
def has_access(self, user, *access_levels):
return GroupMember.objects.filter(
group=self,
user=user.id,
access__in=access_levels
).exists()
def __str__(self):
return self.slug
def full_clean(self, **kwargs):
errors = {}
try:
super().full_clean(**kwargs)
except ValidationError as e:
errors = e.update_error_dict(errors)
if self.slug and not re.match(self.valid_slug_pattern, self.slug):
errors['slug'] = [N_('Enter a valid value.')]
if errors:
raise ValidationError(errors)
__settings__ = None
def get_setting(self, key, default=None):
if self.__settings__ is None:
self.__settings__ = yaml.safe_load(self.settings or '') or {}
return self.__settings__.get(key, default)
class Meta:
ordering = ['slug']
|
class Group(models.Model, DisplayName):
def add_user(self, user, access=None):
pass
def add_admin(self, user):
pass
def accessible_to(self, user):
pass
def can_submit_results(self, user):
pass
def can_submit_testjobs(self, user):
pass
def writable_by(self, user):
pass
def has_access(self, user, *access_levels):
pass
def __str__(self):
pass
def full_clean(self, **kwargs):
pass
def get_setting(self, key, default=None):
pass
class Meta:
| 12 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 2 | 2 | 1 | 1 | 10 | 0 | 10 | 11 | 62 | 14 | 48 | 24 | 36 | 0 | 44 | 23 | 32 | 4 | 2 | 1 | 15 |
145,813 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.GroupManager
|
class GroupManager(models.Manager):
def accessible_to(self, user):
if user.is_superuser:
return self.all().order_by('slug').annotate(project_count=Count('projects', distinct=True))
projects = Project.objects.accessible_to(user)
project_ids = [p.id for p in projects]
group_ids = set([p.group_id for p in projects])
if not isinstance(user, AnonymousUser):
group_ids = group_ids | set([g.id for g in Group.objects.filter(members__in=[user])])
return self.filter(
id__in=group_ids
).order_by('slug').annotate(
project_count=Count(
'projects', distinct=True, filter=Q(projects__id__in=project_ids)
)
)
|
class GroupManager(models.Manager):
def accessible_to(self, user):
pass
| 2 | 0 | 15 | 0 | 15 | 0 | 3 | 0 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 1 | 17 | 1 | 16 | 5 | 14 | 0 | 10 | 5 | 8 | 3 | 1 | 1 | 3 |
145,814 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.Build
|
class Build(models.Model):
project = models.ForeignKey(Project, related_name='builds', on_delete=models.CASCADE)
version = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
datetime = models.DateTimeField()
patch_source = models.ForeignKey(PatchSource, null=True, blank=True, on_delete=models.CASCADE)
patch_baseline = models.ForeignKey('Build', null=True, blank=True, on_delete=models.CASCADE)
patch_id = models.CharField(max_length=1024, null=True, blank=True)
patch_url = models.URLField(null=True, blank=True)
patch_notified = models.BooleanField(default=False)
keep_data = models.BooleanField(
default=False,
help_text="Keep this build data even after the project data retention period has passed"
)
is_release = models.BooleanField(
default=False,
help_text="Indication whether the build is considered a release"
)
release_label = models.CharField(
max_length=64,
null=True,
blank=True,
help_text="Name or label applied to the release build"
)
callbacks = CallbackForeignKey()
class Meta:
unique_together = ('project', 'version',)
ordering = ['datetime']
def save(self, *args, **kwargs):
if not self.datetime:
self.datetime = timezone.now()
with transaction.atomic():
super(Build, self).save(*args, **kwargs)
ProjectStatus.objects.get_or_create(build=self)
def __str__(self):
return '%s (%s)' % (self.version, self.datetime)
def prefetch(self, *related):
prefetch_related_objects([self], *related)
def reset_events(self):
"""
It might be useful for some projects to "reset" build events like
sending out reports or triggering callbacks.
"""
# Reset notifications
self.status.finished = False
self.status.notified = False
self.status.notified_on_timeout = None
self.status.approved = False
self.status.save()
# Reset patch notifications
self.patch_notified = False
self.save()
# Reset callbacks
self.callbacks.filter(event=callback_events.ON_BUILD_FINISHED, is_sent=True).update(is_sent=False)
@property
def test_summary(self):
return TestSummary(self)
__metadata__ = None
@property
def metadata(self):
"""
The build metadata is the union of the metadata in its test runs.
Common keys with different values are transformed into a list with each
of the different values.
"""
if self.__metadata__ is None:
metadata = {}
for test_run in self.test_runs.only('metadata_file', 'build_id'):
for key, value in test_run.metadata.items():
metadata.setdefault(key, [])
if value not in metadata[key]:
metadata[key].append(value)
for key in metadata.keys():
if len(metadata[key]) == 1:
metadata[key] = metadata[key][0]
else:
metadata[key] = sorted(metadata[key], key=str)
self.__metadata__ = metadata
return self.__metadata__
__metadata_by_testrun__ = None
@property
def metadata_by_testrun(self):
"""
The build metadata is the union of the metadata in its test runs.
Common keys with different values are transformed into a list with each
of the different values.
"""
if self.__metadata_by_testrun__ is None:
metadata = {}
for test_run in self.test_runs.only('metadata_file', 'build_id'):
metadata[test_run.id] = test_run.metadata
self.__metadata_by_testrun__ = metadata
return self.__metadata_by_testrun__
__attachments__ = None
@property
def attachments(self):
"""
List of attachments from all testruns
"""
if self.__attachments__ is None:
attachments = {}
for test_run in self.test_runs.all():
attachments[test_run.pk] = []
for attachment in test_run.attachments.all():
attachments[test_run.pk].append(attachment.filename)
self.__attachments__ = attachments
return self.__attachments__
@property
def important_metadata(self):
wanted = (self.project.important_metadata_keys or '').splitlines()
m = self.metadata
metadata = self.metadata
if len(wanted):
metadata = {k: m[k] for k in wanted if k in m}
return metadata
@property
def has_extra_metadata(self):
if set(self.important_metadata.keys()) == set(self.metadata.keys()):
return False
return True
@property
def finished(self):
"""
A finished build is a build that satisfies one of the following conditions:
* it has no pending CI test jobs.
* it has no submitted CI test jobs, and has at least N test runs for each of
the project environments, where N is configured in
Environment.expected_test_runs. Environment.expected_test_runs is
interpreted as follows:
* None (empty): there must be at least one test run for that
environment.
* 0: the environment is ignored, i.e. any amount of test runs will
be ok, including 0.
* N > 0: at least N test runs are expected for that environment
"""
reasons = []
# XXX note that by using test_jobs here, we are adding an implicit
# dependency on squad.ci, what in theory violates our architecture.
testjobs = self.test_jobs
if testjobs.count() > 0:
if testjobs.pending().count() > 0:
# a build that has pending CI jobs is NOT finished
reasons.append("There are unfinished CI jobs")
else:
# carry on, and check whether the number of expected test runs
# per environment is satisfied.
pass
elif self.test_runs.count() == 0:
reasons.append("There are no testjobs or testruns for the build")
# builds with no CI jobs are finished when each environment has
# received the expected amount of test runs
testruns = {
e.id: {
'name': str(e),
'expected': e.expected_test_runs,
'received': 0
}
for e in self.project.environments.all()
}
for t in self.test_runs.filter(completed=True).only('build_id', 'environment_id').all():
testruns[t.environment_id]['received'] += 1
for env, count in testruns.items():
expected = count['expected']
received = count['received']
env_name = count['name']
if expected and expected > 0:
if received == 0:
reasons.append("No test runs for %s received so far" % env_name)
elif received < expected:
reasons.append(
"%d test runs expected for %s, but only %d received so far" % (
expected,
env_name,
received,
)
)
return (len(reasons) == 0, reasons)
@property
def test_suites_by_environment(self):
test_runs = self.test_runs.prefetch_related(
'tests',
'tests__suite',
'environment',
)
template = OrderedDict((
('fail', 0),
('pass', 0),
('skip', 0),
('xfail', 0),
))
result = OrderedDict()
envlist = set([t.environment for t in test_runs])
for env in sorted(envlist, key=lambda env: env.slug):
result[env] = dict()
for tr in test_runs:
for t in tr.tests.all():
if t.suite in result[tr.environment].keys():
result[tr.environment][t.suite][t.status] += 1
else:
if t.suite not in result[tr.environment]:
result[tr.environment][t.suite] = template.copy()
result[tr.environment][t.suite][t.status] += 1
for env in result.keys():
# there should only be one key in the most nested dict
result[env] = sorted(
result[env].items(),
key=lambda suite_dict: suite_dict[0].slug)
return result
def test_jobs_summary(self, per_environment=False):
testjobs = self.test_jobs.only('environment', 'job_status', 'target_build').order_by('environment')
summary = {}
if per_environment:
for env, jobs in groupby(testjobs, lambda tj: tj.environment):
if env is None:
continue
summary[env] = Counter([tj.job_status for tj in jobs])
else:
summary = Counter([tj.job_status for tj in testjobs])
return summary
|
class Build(models.Model):
class Meta:
def save(self, *args, **kwargs):
pass
def __str__(self):
pass
def prefetch(self, *related):
pass
def reset_events(self):
'''
It might be useful for some projects to "reset" build events like
sending out reports or triggering callbacks.
'''
pass
@property
def test_summary(self):
pass
@property
def metadata(self):
'''
The build metadata is the union of the metadata in its test runs.
Common keys with different values are transformed into a list with each
of the different values.
'''
pass
@property
def metadata_by_testrun(self):
'''
The build metadata is the union of the metadata in its test runs.
Common keys with different values are transformed into a list with each
of the different values.
'''
pass
@property
def attachments(self):
'''
List of attachments from all testruns
'''
pass
@property
def important_metadata(self):
pass
@property
def has_extra_metadata(self):
pass
@property
def finished(self):
'''
A finished build is a build that satisfies one of the following conditions:
* it has no pending CI test jobs.
* it has no submitted CI test jobs, and has at least N test runs for each of
the project environments, where N is configured in
Environment.expected_test_runs. Environment.expected_test_runs is
interpreted as follows:
* None (empty): there must be at least one test run for that
environment.
* 0: the environment is ignored, i.e. any amount of test runs will
be ok, including 0.
* N > 0: at least N test runs are expected for that environment
'''
pass
@property
def test_suites_by_environment(self):
pass
def test_jobs_summary(self, per_environment=False):
pass
| 23 | 5 | 15 | 1 | 11 | 3 | 3 | 0.23 | 1 | 10 | 3 | 0 | 13 | 0 | 13 | 13 | 252 | 33 | 178 | 69 | 155 | 41 | 129 | 61 | 114 | 9 | 1 | 4 | 44 |
145,815 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.KnownIssue
|
class KnownIssue(models.Model):
title = models.CharField(max_length=1024)
test_name = models.CharField(max_length=1024)
url = models.URLField(null=True, blank=True)
notes = models.TextField(null=True, blank=True)
active = models.BooleanField(default=True)
intermittent = models.BooleanField(default=False)
environments = models.ManyToManyField(Environment)
@classmethod
def active_by_environment(cls, environment):
return cls.objects.filter(active=True, environments=environment)
@classmethod
def active_by_project_and_test(cls, project, test_name=None):
qs = cls.objects.filter(active=True, environments__project=project).prefetch_related('environments')
if test_name:
qs = qs.filter(test_name=test_name)
return qs.distinct()
|
class KnownIssue(models.Model):
@classmethod
def active_by_environment(cls, environment):
pass
@classmethod
def active_by_project_and_test(cls, project, test_name=None):
pass
| 5 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 | 2 | 21 | 4 | 17 | 13 | 12 | 0 | 15 | 11 | 12 | 2 | 1 | 1 | 3 |
145,816 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.MetricManager
|
class MetricManager(models.Manager):
def by_full_name(self, name):
(suite, metric) = parse_name(name)
return self.filter(metadata__suite=suite, metadata__name=metric)
|
class MetricManager(models.Manager):
def by_full_name(self, name):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
145,817 |
Linaro/squad
|
Linaro_squad/squad/frontend/tests.py
|
squad.frontend.tests.TestResultTable
|
class TestResultTable(list):
"""
A plain list with a few extra attributes. Each list item represents one row
of the table, and should be an instance of TestResult.
This class also mimics a Django Paginator so that it can be used with our
pagination UI.
"""
def __init__(self):
self.environments = None
self.filters = {
'environment': None,
'suite': None,
}
self.paginator = self
self.paginator.num_pages = 0
self.number = 0
self.all_tests = []
def __get_all_tests__(self, build, search, env=None, suite=None):
queryset = Test.objects.filter(build=build)
if search:
queryset = queryset.filter(metadata__name__icontains=search)
if env:
environment = Environment.objects.filter(project=build.project, slug=env)
if environment.exists():
self.filters['environment'] = environment.first()
queryset = queryset.filter(environment=self.filters['environment'])
if suite:
suite_ = Suite.objects.filter(project=build.project, slug=suite)
if suite_.exists():
self.filters['suite'] = suite_.first()
queryset = queryset.filter(suite=self.filters['suite'])
self.all_tests = queryset.only('result', 'has_known_issues', 'metadata_id').order_by()
# count how many unique tests are represented in the given build, and sets
# pagination data
def __count_pages__(self, per_page):
distinct_tests = set([test.metadata_id for test in self.all_tests])
count = len(distinct_tests)
self.num_pages = count // per_page
if count % per_page > 0:
self.num_pages += 1
def __get_page_filter__(self, page, per_page):
"""
Query to obtain one page os test results. It is used to know which tests
should be in the page. It's ordered so that the tests with more failures
come first, then tests with less failures, then tests with more skips.
After the tests in the page have been determined here, a new query is
needed to obtain the data about per-environment test results.
"""
offset = (page - 1) * per_page
stats = defaultdict(lambda: {'pass': 0, 'fail': 0, 'xfail': 0, 'skip': 0})
for test in self.all_tests:
stats[test.metadata_id][test.status] += 1
def keyfunc(item):
metadata_id = item[0]
statuses = item[1]
return tuple((-statuses[k] for k in ['fail', 'xfail', 'skip', 'pass'])) + (metadata_id,)
ordered = sorted(stats.items(), key=keyfunc)
tests_in_page = ordered[offset:offset + per_page]
metadata_ids = [t[0] for t in tests_in_page]
return metadata_ids
@classmethod
def get(cls, build, page, search, per_page=50, env=None, suite=None):
table = cls()
table.__get_all_tests__(build, search, env=env, suite=suite)
table.number = page
table.__count_pages__(per_page)
if table.filters['environment']:
table.environments = {table.filters['environment']}
else:
table.environments = set([t.environment for t in build.test_runs.prefetch_related('environment').all()])
queryset = build.tests
if table.filters['environment']:
queryset = queryset.filter(environment=table.filters['environment'])
if table.filters['suite']:
queryset = queryset.filter(suite=table.filters['suite'])
tests = queryset.filter(
metadata_id__in=table.__get_page_filter__(page, per_page),
).prefetch_related(
'suite__metadata',
'metadata',
)
memo = defaultdict(lambda: defaultdict(list))
for test in tests:
memo[test.full_name][test.environment_id].append(test)
# handle duplicates
for full_name in memo.keys():
env_ids = memo[full_name].keys()
for env_id in env_ids:
test = memo[full_name][env_id][0]
if len(memo[full_name][env_id]) == 1:
memo[full_name][env_id] = [test.status, None]
else:
duplicates = memo[full_name][env_id]
memo[full_name][env_id] = list(test_confidence(None, list_of_duplicates=duplicates))
error_info = {
"test_description": test.metadata.description if test.metadata else '',
"suite_instructions": test.suite.metadata.instructions_to_reproduce if test.suite.metadata else '',
"test_instructions": test.metadata.instructions_to_reproduce if test.metadata else '',
"test_log": test.log or '',
}
info = json.dumps(error_info) if any(error_info.values()) else None
memo[full_name][env_id].append(info)
if 'test_metadata' not in memo[full_name].keys():
memo[full_name]['test_metadata'] = (test.test_run_id, test.suite, test.name)
for test_full_name, results in memo.items():
test_result = TestResult(test_full_name)
test_result.test_run, test_result.suite, test_result.short_name = results.get('test_metadata', None)
for env in table.environments:
test_result.append(results.get(env.id, ["n/a", None]))
table.append(test_result)
table.sort()
return table
|
class TestResultTable(list):
'''
A plain list with a few extra attributes. Each list item represents one row
of the table, and should be an instance of TestResult.
This class also mimics a Django Paginator so that it can be used with our
pagination UI.
'''
def __init__(self):
pass
def __get_all_tests__(self, build, search, env=None, suite=None):
pass
def __count_pages__(self, per_page):
pass
def __get_page_filter__(self, page, per_page):
'''
Query to obtain one page os test results. It is used to know which tests
should be in the page. It's ordered so that the tests with more failures
come first, then tests with less failures, then tests with more skips.
After the tests in the page have been determined here, a new query is
needed to obtain the data about per-environment test results.
'''
pass
def keyfunc(item):
pass
@classmethod
def get(cls, build, page, search, per_page=50, env=None, suite=None):
pass
| 8 | 2 | 21 | 4 | 16 | 1 | 5 | 0.17 | 1 | 6 | 4 | 0 | 4 | 6 | 5 | 38 | 140 | 28 | 96 | 40 | 88 | 16 | 80 | 39 | 73 | 15 | 2 | 3 | 27 |
145,818 |
Linaro/squad
|
Linaro_squad/squad/frontend/tests.py
|
squad.frontend.tests.TestResult
|
class TestResult(list):
"""
A List of pass/fail/skip statuses, one per environment. Represents one row
of the test results table.
"""
def __init__(self, name, test_run=None, suite=None, short_name=None):
self.name = name
self.short_name = short_name
self.test_run = test_run
self.suite = suite
self.totals = {"pass": 0, "fail": 0, "xfail": 0, "skip": 0, "n/a": 0}
def append(self, item):
self.totals[item[0]] += 1
return super(TestResult, self).append(item)
def ordering(self):
return tuple((-self.totals[k] for k in ("fail", "xfail", "skip", "pass", "n/a"))) + (self.name,)
def __lt__(self, other):
return self.ordering() < other.ordering()
|
class TestResult(list):
'''
A List of pass/fail/skip statuses, one per environment. Represents one row
of the test results table.
'''
def __init__(self, name, test_run=None, suite=None, short_name=None):
pass
def append(self, item):
pass
def ordering(self):
pass
def __lt__(self, other):
pass
| 5 | 1 | 3 | 0 | 3 | 0 | 1 | 0.29 | 1 | 2 | 0 | 0 | 4 | 5 | 4 | 37 | 22 | 4 | 14 | 10 | 9 | 4 | 14 | 10 | 9 | 1 | 2 | 0 | 4 |
145,819 |
Linaro/squad
|
Linaro_squad/squad/frontend/project_settings.py
|
squad.frontend.project_settings.DeleteProjectForm
|
class DeleteProjectForm(DeleteConfirmationForm):
label = N_('Type the project slug (the name used in URLs) to confirm')
no_match_message = N_('The confirmation does not match the project slug')
|
class DeleteProjectForm(DeleteConfirmationForm):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
145,820 |
Linaro/squad
|
Linaro_squad/squad/frontend/management/commands/get_token.py
|
squad.frontend.management.commands.get_token.Command
|
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'PROJECT',
help='Target project, on the form $group/$project',
)
def handle(self, *args, **options):
groupname, projectname = options['PROJECT'].split('/')
group, _ = Group.objects.get_or_create(slug=groupname, defaults={'name': groupname})
project, _ = group.projects.get_or_create(slug=projectname, defaults={'name': projectname})
user, _ = User.objects.get_or_create(username='%s-%s-submitter' % (groupname, projectname))
GroupMember.objects.get_or_create(group=group, user=user, defaults={'access': 'submitter'})
token, _ = Token.objects.get_or_create(user=user)
self.output(token.key)
def output(self, msg):
print(msg)
|
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
pass
def output(self, msg):
pass
| 4 | 0 | 6 | 1 | 5 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 3 | 0 | 3 | 3 | 21 | 5 | 16 | 9 | 12 | 0 | 13 | 9 | 9 | 1 | 1 | 0 | 3 |
145,821 |
Linaro/squad
|
Linaro_squad/squad/frontend/group_settings.py
|
squad.frontend.group_settings.NewProjectView
|
class NewProjectView(GroupViewMixin, CreateView):
template_name = 'squad/group_settings/new_project.jinja2'
form_class = NewProjectForm
def get_extra_form_kwargs(self):
return {'instance': Project(group=self.group)}
def form_valid(self, form):
project = form.save()
return redirect(reverse('project-settings', args=[self.group.slug, project.slug]))
|
class NewProjectView(GroupViewMixin, CreateView):
def get_extra_form_kwargs(self):
pass
def form_valid(self, form):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 2 | 8 | 11 | 3 | 8 | 6 | 5 | 0 | 8 | 6 | 5 | 1 | 2 | 0 | 2 |
145,822 |
Linaro/squad
|
Linaro_squad/squad/frontend/group_settings.py
|
squad.frontend.group_settings.NewGroupView
|
class NewGroupView(CreateView):
template_name = 'squad/group_settings/new_group.jinja2'
form_class = NewGroupForm
def form_valid(self, form):
group = form.save()
group.add_admin(self.request.user)
return redirect(reverse('group-settings', args=[group.slug]))
|
class NewGroupView(CreateView):
def form_valid(self, form):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 9 | 2 | 7 | 5 | 5 | 0 | 7 | 5 | 5 | 1 | 1 | 0 | 1 |
145,823 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.MetricThreshold
|
class MetricThreshold(models.Model):
class Meta:
unique_together = ('project', 'environment', 'name',)
project = models.ForeignKey(Project, related_name='thresholds', on_delete=models.CASCADE)
environment = models.ForeignKey(Environment, null=True, blank=True, default=None, on_delete=models.CASCADE)
name = models.CharField(max_length=1024)
value = models.FloatField(null=True, blank=True)
is_higher_better = models.BooleanField(default=False)
def _check_duplicates(self):
"""
We have to make sure of the following
- a project-wide threshold CANNOT colide to an environment-specific one
We need to check if there's any environment that matches the thresold
- an environment-specific threshold CANNOT colide to a project-wide one
We need to check if there's already a project-wide threshold
"""
project_wide = self.environment is None
existingThresholds = MetricThreshold.objects.filter(
name=self.name,
value=self.value,
is_higher_better=self.is_higher_better,
project=self.project,
environment__isnull=not project_wide)
if existingThresholds.count() > 0:
threshold = existingThresholds.first()
if threshold.environment is not None:
raise ValidationError("Found a threshold for environment '%s' with the exact same attributes" % threshold.environment)
else:
raise ValidationError("Found a threshold that already applies to the whole project")
def save(self, *args, **kwargs):
self._check_duplicates()
super().save(*args, **kwargs)
__regex__ = None
@property
def name_regex(self):
return r'^%s$' % re.escape(self.name).replace('\\*', '.*?')
def match(self, metric_fullname):
if self.__regex__ is None:
self.__regex__ = re.compile(self.name_regex)
return self.__regex__.match(metric_fullname)
|
class MetricThreshold(models.Model):
class Meta:
def _check_duplicates(self):
'''
We have to make sure of the following
- a project-wide threshold CANNOT colide to an environment-specific one
We need to check if there's any environment that matches the thresold
- an environment-specific threshold CANNOT colide to a project-wide one
We need to check if there's already a project-wide threshold
'''
pass
def save(self, *args, **kwargs):
pass
@property
def name_regex(self):
pass
def match(self, metric_fullname):
pass
| 7 | 1 | 8 | 1 | 6 | 2 | 2 | 0.21 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 4 | 49 | 9 | 33 | 17 | 26 | 7 | 26 | 16 | 20 | 3 | 1 | 2 | 7 |
145,824 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.MetricsSummary
|
class MetricsSummary(object):
def __init__(self, build, environment=None):
queryset = Metric.objects.filter(build=build)
if environment:
queryset = queryset.filter(environment=environment)
metrics = queryset.all()
values = [m.result for m in metrics]
self.value = geomean(values)
self.has_metrics = len(values) > 0
|
class MetricsSummary(object):
def __init__(self, build, environment=None):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 1 | 2 | 1 | 1 | 10 | 1 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 2 | 1 | 1 | 2 |
145,825 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.NotificationDelivery
|
class NotificationDelivery(models.Model):
status = models.ForeignKey('ProjectStatus', related_name='deliveries', on_delete=models.CASCADE)
subject = models.CharField(max_length=40, null=True, blank=True)
txt = models.CharField(max_length=40, null=True, blank=True)
html = models.CharField(max_length=40, null=True, blank=True)
class Meta:
unique_together = ('status', 'subject', 'txt', 'html')
@classmethod
def exists(cls, status, subject, txt, html):
subject_hash = sha1(subject.encode()).hexdigest()
txt_hash = sha1(txt.encode()).hexdigest()
html_hash = sha1(html.encode()).hexdigest()
obj, created = cls.objects.get_or_create(
status=status,
subject=subject_hash,
txt=txt_hash,
html=html_hash,
)
return (not created)
|
class NotificationDelivery(models.Model):
class Meta:
@classmethod
def exists(cls, status, subject, txt, html):
pass
| 4 | 0 | 11 | 0 | 11 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 22 | 3 | 19 | 13 | 15 | 0 | 13 | 12 | 10 | 1 | 1 | 0 | 1 |
145,826 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.PatchSource
|
class PatchSource(models.Model):
"""
A patch is source is a platform from where a patch comes from, e.g. github,
a gitlab instance, a gerrit instance. The *implementation* field specifies
which plugin should handle the implementation details for that given patch
source.
"""
name = models.CharField(max_length=256, unique=True)
username = models.CharField(max_length=128)
_password = models.CharField(max_length=256, null=True, blank=True, db_column="password")
url = CustomURLField(help_text="scheme://host, ex: 'http://github.com', 'ssh://gerrit.host, etc'")
token = models.CharField(max_length=1024, blank=True, null=True)
implementation = PluginField(
default='null',
features=[
Plugin.notify_patch_build_created,
Plugin.notify_patch_build_finished,
],
)
def get_password(self):
if self._password:
return decrypt(self._password)
return None
def set_password(self, new_password):
self._password = encrypt(new_password)
password = property(get_password, set_password)
def get_implementation(self):
return get_plugin_instance(self.implementation)
def get_url(self, patch_id):
return self.get_implementation().get_url(patch_id)
def __str__(self):
return 'PatchSource %s (%s)' % (self.name, self.implementation)
|
class PatchSource(models.Model):
'''
A patch is source is a platform from where a patch comes from, e.g. github,
a gitlab instance, a gerrit instance. The *implementation* field specifies
which plugin should handle the implementation details for that given patch
source.
'''
def get_password(self):
pass
def set_password(self, new_password):
pass
def get_implementation(self):
pass
def get_url(self, patch_id):
pass
def __str__(self):
pass
| 6 | 1 | 2 | 0 | 2 | 0 | 1 | 0.23 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 5 | 38 | 6 | 26 | 13 | 20 | 6 | 20 | 13 | 14 | 2 | 1 | 1 | 6 |
145,827 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.Metric
|
class Metric(models.Model):
build = models.ForeignKey(Build, related_name='metrics', on_delete=models.CASCADE, null=True)
environment = models.ForeignKey(Environment, related_name='metrics', on_delete=models.CASCADE, null=True)
test_run = models.ForeignKey(TestRun, related_name='metrics', on_delete=models.CASCADE)
suite = models.ForeignKey(Suite, on_delete=models.CASCADE)
metadata = models.ForeignKey(
SuiteMetadata,
null=True,
related_name='+',
limit_choices_to={'kind': 'metric'},
on_delete=models.CASCADE,
)
result = models.FloatField()
unit = models.CharField(null=True, max_length=30)
measurements = models.TextField() # comma-separated float numbers
is_outlier = models.BooleanField(default=False)
objects = MetricManager()
@property
def measurement_list(self):
if self.measurements:
return [float(n) for n in self.measurements.split(',')]
else:
return []
@property
def full_name(self):
return join_name(self.metadata.suite, self.name)
@property
def name(self):
if self.metadata is None:
return 'missing metric name'
return self.metadata.name
def __str__(self):
return '%s: %f' % (self.name, self.result)
|
class Metric(models.Model):
@property
def measurement_list(self):
pass
@property
def full_name(self):
pass
@property
def name(self):
pass
def __str__(self):
pass
| 8 | 0 | 3 | 0 | 3 | 0 | 2 | 0.03 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 4 | 38 | 5 | 33 | 18 | 25 | 1 | 23 | 15 | 18 | 2 | 1 | 1 | 6 |
145,828 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.Attachment
|
class Attachment(models.Model):
test_run = models.ForeignKey(TestRun, related_name='attachments', on_delete=models.CASCADE)
filename = models.CharField(null=False, max_length=1024)
mimetype = models.CharField(null=False, max_length=128, default="application/octet-stream")
storage = models.FileField(null=True)
length = models.IntegerField(default=None)
__data__ = None
@property
def data(self):
if self.__data__ is None:
if self.storage:
self.__data__ = self.storage.read()
self.storage.seek(0)
else:
self.__data__ = b''
return self.__data__
def save_file(self, filename, contents):
storage_save(self, self.storage, filename, contents)
|
class Attachment(models.Model):
@property
def data(self):
pass
def save_file(self, filename, contents):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 21 | 3 | 18 | 10 | 14 | 0 | 16 | 9 | 13 | 3 | 1 | 2 | 4 |
145,829 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.Annotation
|
class Annotation(models.Model):
description = models.CharField(max_length=1024, null=True, blank=True)
build = models.OneToOneField(Build, on_delete=models.CASCADE)
def __str__(self):
return '%s' % self.description
|
class Annotation(models.Model):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 6 | 1 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 1 | 1 | 0 | 1 |
145,830 |
Linaro/squad
|
Linaro_squad/squad/core/models.py
|
squad.core.models.AdminSubscription
|
class AdminSubscription(models.Model):
project = models.ForeignKey(Project, related_name='admin_subscriptions', on_delete=models.CASCADE)
email = models.CharField(max_length=1024, validators=[EmailValidator()])
def __str__(self):
return '%s on %s' % (self.email, self.project)
|
class AdminSubscription(models.Model):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 6 | 1 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 1 | 1 | 0 | 1 |
145,831 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0161_add_metricthreshold_perm_to_squad_group.py
|
squad.core.migrations.0161_add_metricthreshold_perm_to_squad_group.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0160_add_project_to_metricthreshold'),
]
operations = [
migrations.RunPython(
create_squad_group_and_add_users,
reverse_code=migrations.RunPython.noop
)
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 10 | 3 | 9 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,832 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0162_project_add_build_confidence_settings.py
|
squad.core.migrations.0162_project_add_build_confidence_settings.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0161_add_metricthreshold_perm_to_squad_group'),
]
operations = [
migrations.AddField(
model_name='project',
name='build_confidence_count',
field=models.IntegerField(default=20, help_text='Number of previous builds to compare to'),
),
migrations.AddField(
model_name='project',
name='build_confidence_threshold',
field=models.IntegerField(default=90, help_text='Percentage of previous builds that built successfully'),
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 2 | 16 | 3 | 15 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,833 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0163_hirtoricalemailtemplate_update.py
|
squad.core.migrations.0163_hirtoricalemailtemplate_update.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0162_project_add_build_confidence_settings'),
]
operations = [
migrations.AlterModelOptions(
name='historicalemailtemplate',
options={'get_latest_by': ('history_date', 'history_id'), 'ordering': ('-history_date', '-history_id'), 'verbose_name': 'historical email template', 'verbose_name_plural': 'historical email templates'},
),
migrations.AlterField(
model_name='historicalemailtemplate',
name='history_date',
field=models.DateTimeField(db_index=True),
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 2 | 15 | 3 | 14 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,834 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0164_django_update.py
|
squad.core.migrations.0164_django_update.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0163_hirtoricalemailtemplate_update'),
]
operations = [
migrations.AlterField(
model_name='projectstatus',
name='notified_on_timeout',
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name='test',
name='has_known_issues',
field=models.BooleanField(null=True),
),
migrations.AlterField(
model_name='test',
name='result',
field=models.BooleanField(null=True),
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 2 | 21 | 3 | 20 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,835 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0165_buildsummary_uniqueness.py
|
squad.core.migrations.0165_buildsummary_uniqueness.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0164_django_update'),
]
operations = [
migrations.RunPython(
remove_duplicates,
reverse_code=migrations.RunPython.noop
),
migrations.AlterUniqueTogether(
name='buildsummary',
unique_together={('build', 'environment')},
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 2 | 14 | 3 | 13 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,836 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.EnvironmentViewSet
|
class EnvironmentViewSet(ModelViewSet):
"""
List of environments. Only environments belonging to public projects and
projects you have access to are available.
"""
queryset = Environment.objects
project_lookup_key = 'project__in'
serializer_class = EnvironmentSerializer
filterset_fields = ('project', 'slug', 'name')
filter_fields = filterset_fields # TODO: remove when django-filters 1.x is not supported anymore
filterset_class = EnvironmentFilter
filter_class = filterset_class # TODO: remove when django-filters 1.x is not supported anymore
search_fields = ('slug', 'name')
ordering_fields = ('id', 'slug', 'name')
|
class EnvironmentViewSet(ModelViewSet):
'''
List of environments. Only environments belonging to public projects and
projects you have access to are available.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 14 | 0 | 10 | 10 | 9 | 6 | 10 | 10 | 9 | 0 | 2 | 0 | 0 |
145,837 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.EmailTemplateViewSet
|
class EmailTemplateViewSet(viewsets.ModelViewSet):
"""
List of email templates used.
"""
queryset = EmailTemplate.objects.all()
serializer_class = EmailTemplateSerializer
filterset_fields = ('name',)
filter_fields = filterset_fields # TODO: remove when django-filters 1.x is not supported anymore
ordering_fields = ('name', 'id')
|
class EmailTemplateViewSet(viewsets.ModelViewSet):
'''
List of email templates used.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.67 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0 | 6 | 6 | 5 | 4 | 6 | 6 | 5 | 0 | 1 | 0 | 0 |
145,838 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.DynamicFieldsModelSerializer
|
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, *args, **kwargs):
# Instantiate the superclass normally
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
fields = self.context['request'].query_params.get('fields')
if fields:
fields = fields.split(',')
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
|
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
'''
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
'''
def __init__(self, *args, **kwargs):
pass
| 2 | 1 | 12 | 1 | 9 | 2 | 3 | 0.6 | 1 | 2 | 0 | 20 | 1 | 0 | 1 | 1 | 17 | 1 | 10 | 6 | 8 | 6 | 10 | 6 | 8 | 3 | 1 | 2 | 3 |
145,839 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.DelayedReportViewSet
|
class DelayedReportViewSet(viewsets.ReadOnlyModelViewSet):
queryset = DelayedReport.objects
serializer_class = DelayedReportSerializer
filterset_fields = ('build', 'baseline', 'callback', 'callback_notified', 'email_recipient', 'status_code', 'error_message', 'created_at')
filter_fields = filterset_fields # TODO: remove when django-filters 1.x is not supported anymore
filterset_class = DelayedReportFilter
filter_class = filterset_class # TODO: remove when django-filters 1.x is not supported anymore
ordering_fields = ('id', 'created_at')
|
class DelayedReportViewSet(viewsets.ReadOnlyModelViewSet):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 1 | 8 | 8 | 7 | 2 | 8 | 8 | 7 | 0 | 1 | 0 | 0 |
145,840 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.ConfidenceSerializer
|
class ConfidenceSerializer(serializers.BaseSerializer):
def to_representation(self, confidence):
return {
"count": confidence.count,
"passes": confidence.passes,
"score": confidence.score,
}
|
class ConfidenceSerializer(serializers.BaseSerializer):
def to_representation(self, confidence):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 7 | 0 | 7 | 2 | 5 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
145,841 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.BuildsComparisonSerializer
|
class BuildsComparisonSerializer(serializers.BaseSerializer):
def to_representation(self, comparison):
ret = {}
if comparison.regressions is not None:
ret['regressions'] = comparison.regressions_grouped_by_suite
if comparison.fixes is not None:
ret['fixes'] = comparison.fixes_grouped_by_suite
return ret
|
class BuildsComparisonSerializer(serializers.BaseSerializer):
def to_representation(self, comparison):
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 3 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 8 | 0 | 8 | 3 | 6 | 0 | 8 | 3 | 6 | 3 | 1 | 1 | 3 |
145,842 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.BackendViewSet
|
class BackendViewSet(viewsets.ModelViewSet):
"""
List of CI backends used.
"""
queryset = Backend.objects.all()
serializer_class = BackendSerializer
filterset_fields = ('implementation_type', 'name', 'url')
filter_fields = filterset_fields # TODO: remove when django-filters 1.x is not supported anymore
search_fields = ('implementation_type', 'name', 'url')
ordering_fields = ('id', 'implementation_type', 'name', 'url')
|
class BackendViewSet(viewsets.ModelViewSet):
'''
List of CI backends used.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.57 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0 | 7 | 7 | 6 | 4 | 7 | 7 | 6 | 0 | 1 | 0 | 0 |
145,843 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.AttachmentSerializer
|
class AttachmentSerializer(serializers.ModelSerializer):
download_url = serializers.SerializerMethodField()
class Meta:
model = Attachment
fields = ('download_url', 'filename', 'mimetype', 'length')
def get_download_url(self, attachment):
request = self.context.get('request')
if request is None:
return None
base_url = rest_reverse('testrun-detail', args=[attachment.test_run.pk], request=request)
filename_url_encoded = urllib.parse.quote(attachment.filename, safe='')
return f'{base_url}attachments/?filename={filename_url_encoded}'
|
class AttachmentSerializer(serializers.ModelSerializer):
class Meta:
def get_download_url(self, attachment):
pass
| 3 | 0 | 7 | 0 | 7 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 15 | 3 | 12 | 9 | 9 | 0 | 12 | 9 | 9 | 2 | 1 | 1 | 2 |
145,844 |
Linaro/squad
|
Linaro_squad/squad/core/migrations/0091_notification_delivery_remove_unique_status.py
|
squad.core.migrations.0091_notification_delivery_remove_unique_status.Migration
|
class Migration(migrations.Migration):
dependencies = [
('core', '0090_populate_test_has_known_issues'),
]
operations = [
migrations.AlterField(
model_name='notificationdelivery',
name='status',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='deliveries', to='core.ProjectStatus'),
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 11 | 3 | 10 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
145,845 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.APIRouter
|
class APIRouter(ExtendedDefaultRouter):
APIRootView = API
|
class APIRouter(ExtendedDefaultRouter):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
145,846 |
Linaro/squad
|
Linaro_squad/squad/api/rest.py
|
squad.api.rest.API
|
class API(routers.APIRootView):
"""
Welcome to the SQUAD API. This API is self-describing, i.e. all of the
available endpoints are accessible from this browseable user interface, and
are self-describing themselves. See below for a list of them.
Notes on the API:
* All requests for lists of objects are paginated by default. Make sure you
take the `count` and `next` fields of the response into account so you can
navigate to the rest of the objects.
* Only public projects are available through the API without
authentication. Non-public projects require authentication using a valid
API token, and the corresponding user account must also have access to
the project in question.
* All URLs displayed in this API browser are clickable.
* Client interaction is enabled with <a href="/api/schema/">/api/schema</a>
URL.
* Testrun statuses are available at:
* `/testruns/<testrun_id>/status/`
"""
def get_view_name(self):
return "API"
|
class API(routers.APIRootView):
'''
Welcome to the SQUAD API. This API is self-describing, i.e. all of the
available endpoints are accessible from this browseable user interface, and
are self-describing themselves. See below for a list of them.
Notes on the API:
* All requests for lists of objects are paginated by default. Make sure you
take the `count` and `next` fields of the response into account so you can
navigate to the rest of the objects.
* Only public projects are available through the API without
authentication. Non-public projects require authentication using a valid
API token, and the corresponding user account must also have access to
the project in question.
* All URLs displayed in this API browser are clickable.
* Client interaction is enabled with <a href="/api/schema/">/api/schema</a>
URL.
* Testrun statuses are available at:
* `/testruns/<testrun_id>/status/`
'''
def get_view_name(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 6 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 28 | 7 | 3 | 2 | 1 | 18 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
145,847 |
Linaro/squad
|
Linaro_squad/squad/api/filters.py
|
squad.api.filters.RestFrameworkFilterBackend
|
class RestFrameworkFilterBackend(backends.DjangoFilterBackend):
filterset_base = FilterSet
def to_html(self, request, queryset, view):
return super().to_html(request, queryset, view)
|
class RestFrameworkFilterBackend(backends.DjangoFilterBackend):
def to_html(self, request, queryset, view):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 1 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.