-
Notifications
You must be signed in to change notification settings - Fork 45
/
CDashQueryAnalyzeReport.py
3061 lines (2703 loc) · 112 KB
/
CDashQueryAnalyzeReport.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# @HEADER
# *****************************************************************************
# TriBITS: Tribal Build, Integrate, and Test System
#
# Copyright 2013-2016 NTESS and the TriBITS contributors.
# SPDX-License-Identifier: BSD-3-Clause
# *****************************************************************************
# @HEADER
try:
# Python 2
from urllib2 import urlopen
from urllib2 import quote as urlquote
except ImportError:
# Python 3
from urllib.request import urlopen
from urllib.parse import quote as urlquote
import sys
import hashlib
import json
import datetime
import copy
import pprint
import csv
from FindGeneralScriptSupport import *
from GeneralScriptSupport import *
from Python2and3 import u, csvReaderNext
import cdash_build_testing_date as CBTD
# Accept the --date input option with values 'today', 'yesterday', or some
# 'YYYY-MM-DD' value.
#
def convertInputDateArgToYYYYMMDD(cdashProjectTestingDayStartTime, dateText,
currentDateTimeStr=None, # Used for unit testing only
):
if dateText == "yesterday" or dateText == "today":
if dateText == "yesterday": dayIncr = -1
else: dayIncr = 0
dateTime = CBTD.getRelativeCDashBuildStartTimeFromCmndLineArgs(
currentDateTimeStr, cdashProjectTestingDayStartTime, dayIncr)
rtnDate = CBTD.getDateOnlyFromDateTime(dateTime)
else:
rtnDate = validateAndConvertYYYYMMDD(dateText)
return rtnDate
# Validate a date YYYY-MM-DD string and return a date object for the
# 'datetime' module.
#
def validateAndConvertYYYYMMDD(dateText):
try:
return datetime.datetime.strptime(dateText, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format for '"+dateText+"', should be YYYY-MM-DD")
# Get a file name string from a general text string.
#
# This replaces non-alphanumeric chars with '_'.
#
def getFileNameStrFromText(inputStr):
fileNameStr = ""
for char in inputStr:
if char.isalnum():
fileNameStr += char
else:
fileNameStr += "_"
return fileNameStr
# Check if the key/value pairs for two dicts are the same and if so, return an
# error message explaining how they are different.
#
# Returns tuple (hasSameKeyValuePairs, errMsg). If
# hasSameKeyValuePairs==True, then errMsg==None. Otherwise, if
# hasSameKeyValuePairs==False, then errMsg gives a string that explains how
# they are different.
#
# This improves on a simple check dict_1 == dict_2 in that shows exactly why
# the dicts are different for a single key/value pair.
#
def checkDictsAreSame(dict_1, dict_1_name, dict_2, dict_2_name):
# Assume all passing unless we find a difference
hasSameKeyValuePairs = True
errMsg = None
# Start with the fast internal Python check
if dict_1 == dict_2:
return (True, None)
# Check if they have the same number of keys
if hasSameKeyValuePairs and (len(dict_1.keys()) != len(dict_2.keys())):
hasSameKeyValuePairs = False
errMsg = "len("+dict_1_name+".keys())="+str(len(dict_1.keys()))+\
" != len("+dict_2_name+".keys())="+str(len(dict_2.keys()))
# Check that they have the same key/value pairs
if hasSameKeyValuePairs:
for key_1 in dict_1.keys():
if not key_1 in dict_2.keys():
hasSameKeyValuePairs = False
errMsg = dict_1_name+"['"+key_1+"'] does not exist in "+dict_2_name
break
keyVal_1 = dict_1[key_1]
keyVal_2 = dict_2[key_1]
if keyVal_1 != keyVal_2:
hasSameKeyValuePairs = False
errMsg = dict_1_name+"['"+key_1+"'] = '"+str(keyVal_1)+"' != "+\
dict_2_name+"['"+key_1+"'] = '"+str(keyVal_2)+"'"
break
#end for
#end if
# Return the final result
return (hasSameKeyValuePairs, errMsg)
# Compress a long file name to avoid open() error
#
# If the full file name must be shorted and if prefix!="", then it is added to
# the beginning of the shortened filename. Also, if ext!="", then "."+ext is
# added to the end of the shortened filename. Otherwise, if inputFileName is
# not too long, then it is returned without modification (i.e. 'prefix' and
# 'ext' are ignored). NOTE: If 'prefix' and 'ext' are too long, then the
# returned shortened filename may also be too long.
#
# This function should return a shorter unique file name that is platform
# independent.
#
def getCompressedFileNameIfTooLong(inputFileName, prefix="", ext=""):
maxFileNameLength = 255 # ToDo: Figure out for this system?
if len(inputFileName) > maxFileNameLength:
hashObject = hashlib.sha1(str(inputFileName).encode('utf-8'))
hashStr = hashObject.hexdigest()
newFileName = prefix+hashObject.hexdigest()
if ext: newFileName += "." + ext
return newFileName
return inputFileName
# Filter and input list and return a list with elements where
# matchFunctor(inputList[i])==True.
#
def getFilteredList(inputList, matchFunctor):
filteredList = []
for ele in inputList:
if matchFunctor(ele): filteredList.append(ele)
return filteredList
# Filter an input list returning a two lists (matchList, nomatchList) where
# the first list has elements where matchFunctor(inputList[i])==True and the
# second list has elements where matchFunctor(inputList[i])==False.
#
def splitListOnMatch(inputList, matchFunctor):
#print("\nsplitListOnMatch(): matchFunctor = "+str(matchFunctor))
matchList = []
nomatchList = []
for ele in inputList:
if matchFunctor(ele): matchList.append(ele)
else: nomatchList.append(ele)
return (matchList, nomatchList)
# DECORATOR match functor class that negates the match of a stored functor.
#
class NotMatchFunctor(object):
# Construct with another functor to negate
def __init__(self, matchFunctor):
self.__matchFunctor = matchFunctor
# Convert to string rep for debugging/etc.
def __str__(self):
myStr = "NotMatchFunctor{"+str(self.__matchFunctor)+"}"
return myStr
# Negate the matchFunctor
def __call__(self, item):
return (self.__matchFunctor(item) == False)
# Apply a functor to transform every element in a list
#
# The object transformFunctor is applied as:
#
# list_inout[i] = transformFunctor(list_inout[i])
#
# If the elements are small value-type objects, then the assignment is needed.
# However, if the list elements are handled with reference semantics like a
# list [] or a dict {} then really the object is being modified in place and
# the assignment is not needed but it cheap and harmess in that case.
#
# This returns the input list transformed but the return object can be ignored
# because it modifies the input list object's elements in place.
#
def foreachTransform(list_inout, transformFunctor):
for i in range(len(list_inout)):
list_inout[i] = transformFunctor(list_inout[i])
return list_inout
# Remove elements from a list given a list of indexes
#
# This modifies the original list inplace but also returns it. Therefore, if
# you want to keep the original list, you better create a copy of the base
# list object before passing it in.
#
def removeElementsFromListGivenIndexes(list_inout, indexesToRemoveList_in):
indexesToRemoveList = copy.copy(indexesToRemoveList_in)
indexesToRemoveList.sort()
numRemoved = 0
for index in indexesToRemoveList:
del list_inout[index-numRemoved]
numRemoved += 1
return list_inout
# Class CsvFileStructure
#
class CsvFileStructure(object):
def __init__(self, headersList, rowsList):
self.headersList = headersList
self.rowsList = rowsList
# Write a CsvFileStructure data to a string
#
def writeCsvFileStructureToStr(csvFileStruct):
csvFileStr = ", ".join(csvFileStruct.headersList)+"\n"
for rowFieldsList in csvFileStruct.rowsList:
csvFileStr += ", ".join(rowFieldsList)+"\n"
return csvFileStr
########################################
# CDash Specific stuff
########################################
#
# Reporting policy, data, and defaults
#
# Collection of data used to create the final HTML CDash report that is
# updated and queried by various functions.
#
# NOTE: This is put into a class object so that these vars can be updated in
# place when passed to a function.
#
class CDashReportData(object):
def __init__(self):
# Gives the final result (assume passing by default)
self.globalPass = True
# This is the top of the HTML body
self.htmlEmailBodyTop = ""
# This is the bottom of the email body
self.htmlEmailBodyBottom = ""
# This var will store the list of data numbers for the summary line
self.summaryLineDataNumbersList = []
def reset(self):
self.globalPass = True
self.htmlEmailBodyTop = ""
self.htmlEmailBodyBottom = ""
self.summaryLineDataNumbersList = []
# Define standard CDash colors
def cdashColorPassed(): return 'green'
def cdashColorFailed(): return 'red'
def cdashColorNotRun(): return 'orange'
def cdashColorMissing(): return 'gray'
# ToDo: Make the above return different colors for a color-blind palette
def getStandardTestsetAcroList():
return ['twoif', 'twoinr', 'twip', 'twim', 'twif', 'twinr']
# Aggregate info about a test set used for generating the summary and table
# and to determine global pass/fail.
#
# Members:
#
# * testsetAcro: e.g. 'twoif'
# * testsetDescr: e.g. "Tests without issue trackers Failed"
# * testsetTableType: Values: 'nopass', 'pass', 'missing'
# * testsetColor: e.g. 'red', 'green' (whatever is accepted by function
# colorHtmlText())
# * existanceTriggersGlobalFail: If 'True' and any of tests fall into this
# test-set category, then it shoulid trigger a global 'False'
class TestsetTypeInfo(object):
def __init__(self, testsetAcro, testsetDescr, testsetTableType, testsetColor,
existanceTriggersGlobalFail=True,
):
self.testsetAcro = testsetAcro
self.testsetDescr = testsetDescr
self.testsetTableType = testsetTableType
self.testsetColor = testsetColor
self.existanceTriggersGlobalFail = existanceTriggersGlobalFail
# Return the TestsetTypeInfo object for the standard types of test sets that get
# their own tables.
#
# testsetArco [in] Acronym for the standard test set (e.g. 'twoif')
#
# testsetColor [in] Gives the color to use for the summary line and the table
# header. If 'None' is passed in (the default), then a standard color is
# used. If the empty string is passed in '', then no color will be applied.
#
def getStandardTestsetTypeInfo(testsetAcro, testsetColor=None):
if testsetAcro == "twoif":
tsti = TestsetTypeInfo(testsetAcro, "Tests without issue trackers Failed", 'nopass',
cdashColorFailed())
elif testsetAcro == "twoinr":
tsti = TestsetTypeInfo(testsetAcro, "Tests without issue trackers Not Run", 'nopass',
cdashColorNotRun())
elif testsetAcro == "twip":
tsti = TestsetTypeInfo(testsetAcro, "Tests with issue trackers Passed", 'pass',
cdashColorPassed(), existanceTriggersGlobalFail=False)
elif testsetAcro == "twim":
tsti = TestsetTypeInfo(testsetAcro, "Tests with issue trackers Missing", 'missing',
cdashColorMissing(), existanceTriggersGlobalFail=False)
elif testsetAcro == "twif":
tsti = TestsetTypeInfo(testsetAcro, "Tests with issue trackers Failed", 'nopass',
cdashColorFailed())
elif testsetAcro == "twinr":
tsti = TestsetTypeInfo(testsetAcro, "Tests with issue trackers Not Run", 'nopass',
cdashColorNotRun())
else:
raise Exception("Error, testsetAcro = '"+str(testsetAcro)+"' is not supported!")
if testsetColor != None:
tsti.testsetColor = testsetColor
return tsti
# Return the 'status' field from a test dict
#
# Return 'Not Run' if the 'status' field is missing. (This happens with one
# customer's tests apparently, see SESW-383.)
#
def getTestDictStatusField(testDict):
return testDict.get('status', 'Not Run')
# Get the Test-set acronym from the fields of a test dict
#
def getTestsetAcroFromTestDict(testDict):
issueTracker = testDict.get('issue_tracker', None)
if isTestFailed(testDict) and issueTracker == None:
return 'twoif'
if isTestNotRun(testDict) and issueTracker == None:
return 'twoinr'
if isTestPassed(testDict) and issueTracker != None:
return 'twip'
if isTestMissing(testDict) and issueTracker != None:
return 'twim'
if isTestFailed(testDict) and issueTracker != None:
return 'twif'
if isTestNotRun(testDict) and issueTracker != None:
return 'twinr'
raise Exception(
"Error, testDict = '"+str(testDict)+"' with fields"+\
" status = '"+str(testDict.get('status', None))+"' and"+\
" issue_tracker = '"+str(testDict.get('issue_tracker', None))+"'"+\
" is not a supported test-set type!")
# Returns True if a test has 'status' 'Passed'
def isTestPassed(testDict):
return (testDict.get('status', None) == 'Passed')
# Returns True if a test has 'status' 'Failed'
def isTestFailed(testDict):
return (testDict.get('status', None) == 'Failed')
# Returns True if a test has 'status' 'Not Run'
def isTestNotRun(testDict):
return (testDict.get('status', None) == 'Not Run')
# Return True if a test is missing
def isTestMissing(testDict):
status = testDict.get('status', None)
if status == 'Missing': return True
if status == 'Missing / Failed': return True
return False
# Define default test dicts sort order in tables
def getDefaultTestDictsSortKeyList() : return ['testname', 'buildName', 'site']
#
# Implementation functions
#
# Given a CDash query URL PHP page that returns JSON data, return the JSON
# data converged to a Python data-structure.
#
# The returned Python object will be a simple nested set of Python dicts and
# lists.
#
# NOTE: This function can't really be unit tested because it actually gets
# data from CDash. Therefore, the code below will be structured such that it
# we can avoid getting call it in any automated tests.
#
def extractCDashApiQueryData(cdashApiQueryUrl):
if sys.version_info < (2,7,5):
raise Exception("Error: Must be using Python 2.7.5 or newer")
# NOTE: If we use Python 2.6.6. then the urllib2 function crashes!
response = urlopen(cdashApiQueryUrl)
return json.load(response)
# Read a CSV file into a list of dictionaries for each row where the rows of
# the output list are dicts with the column names as keys.
#
# For example, for the CSV file:
#
# col_0, col_1, col_2
# val_00, val_01, val_02
# val_10, val_11, val_12
#
# the returned list of dicts will be:
#
# [
# { 'col_0':'val_00', 'col_1':'val_01', 'col_2':'val_02' },
# { 'col_0':'val_10', 'col_1':'val_11', 'col_2':'val_12' },
# ]
#
# This function can also allow the user to assert that the included columns
# match a set of required and optional headers. For example, that above CSV
# file would match:
#
# requiredColumnHeadersList = [ 'col_0', 'col_1', 'col_2' ]
#
# or:
#
# requiredColumnHeadersList = [ 'col_0', 'col_1' ]
# optionalColumnHeadersList = [ 'col_2', 'col_3', ]
#
# The requiredColumnHeadersList and optionalColumnHeadersList argument lists
# are optional.
#
# Also, the columns can be appear in any order as long as they match all of
# the required headers and don't contain any headers not in the list of
# expected headers.
#
def readCsvFileIntoListOfDicts(csvFileName, requiredColumnHeadersList=[],
optionalColumnHeadersList=[],
):
listOfDicts = []
with open(csvFileName, 'r') as csvFile:
csvReader = csv.reader(csvFile)
columnHeadersList = getColumnHeadersFromCsvFileReader(csvFileName, csvReader)
assertExpectedColumnHeadersFromCsvFile(csvFileName, requiredColumnHeadersList,
optionalColumnHeadersList, columnHeadersList)
# Read the rows of the CSV file into dicts
dataRow = 0
for lineList in csvReader:
if not lineList: continue # Ignore blank line
stripWhiltespaceFromStrList(lineList)
assertExpectedNumColsFromCsvFile(csvFileName, dataRow, lineList,
columnHeadersList)
# Read the row entries into a new dict
rowDict = {}
for j in range(len(columnHeadersList)):
rowDict.update( { columnHeadersList[j] : lineList[j] } )
listOfDicts.append(rowDict)
# Update for next row
dataRow += 1
# Return the constructed object
return listOfDicts
def getColumnHeadersFromCsvFileReader(csvFileName, csvReader):
try:
columnHeadersList = csvReaderNext(csvReader)
stripWhiltespaceFromStrList(columnHeadersList)
return columnHeadersList
except StopIteration:
raise Exception(
"Error, CSV file '"+csvFileName+"' is empty which is not allowed!"
)
def assertExpectedColumnHeadersFromCsvFile(csvFileName, requiredColumnHeadersList,
optionalColumnHeadersList, columnHeadersList,
):
if not requiredColumnHeadersList and not optionalColumnHeadersList:
return # No expected column headers to assert against!
requiredAndOptionalHeadersSet = set(requiredColumnHeadersList)
requiredAndOptionalHeadersSet.update(optionalColumnHeadersList)
columnHeadersSet = set(columnHeadersList)
# Assert that each column header is expected
for colHeader in columnHeadersList:
if not colHeader in requiredAndOptionalHeadersSet:
raise Exception(
"Error, for CSV file '"+csvFileName+"' the"+\
" column header '"+str(colHeader)+"' is not in the set"+\
" of required column headers '"+str(requiredColumnHeadersList)+"'"+\
" or optional column headers '"+str(optionalColumnHeadersList)+"'!"+\
""
)
# Assert that all of the required headers are present
for requiredHeader in requiredColumnHeadersList:
if not requiredHeader in columnHeadersSet:
raise Exception(
"Error, for CSV file '"+csvFileName+"' the"+\
" required header '"+str(requiredHeader)+"' is missing from the"+\
" set of included column headers '"+str(columnHeadersList)+"'!"+\
""
)
def assertExpectedNumColsFromCsvFile(csvFileName, dataRow, lineList,
columnHeadersList,
):
if len(lineList) != len(columnHeadersList):
raise Exception(
"Error, for CSV file '"+csvFileName+"' the data row"+\
" "+str(dataRow)+" "+str(lineList)+" has"+\
" "+str(len(lineList))+" entries which does not macth"+\
" the number of column headers "+str(len(columnHeadersList))+"!")
def stripWhiltespaceFromStrList(strListInOut):
for i in range(len(strListInOut)): strListInOut[i] = strListInOut[i].strip()
g_expectedBuildsCsvFileHeadersRequired = \
('group', 'site', 'buildname')
def getExpectedBuildsListOfDictsfromCsvFile(expectedBuildsFileName):
return readCsvFileIntoListOfDicts(expectedBuildsFileName,
g_expectedBuildsCsvFileHeadersRequired)
def getExpectedBuildsListOfDictsFromCsvFileArg(expectedBuildsFileArg):
expectedBuildsLOD = []
if expectedBuildsFileArg:
expectedBuildsFilenameList = expectedBuildsFileArg.split(",")
for expectedBuildsFilename in expectedBuildsFilenameList:
expectedBuildsLOD.extend(
getExpectedBuildsListOfDictsfromCsvFile(expectedBuildsFilename))
return expectedBuildsLOD
# Write list of builds from a builds LOD to a CSV file structure meant to
# match the expected builds CSV file.
#
def expectedBuildsListOfDictsToCsvFileStructure(buildsLOD):
csvFileHeadersList = copy.deepcopy(g_expectedBuildsCsvFileHeadersRequired)
csvFileRowsList = []
for buildDict in buildsLOD:
csvFileRow = (
buildDict['group'],
buildDict['site'],
buildDict['buildname'],
)
csvFileRowsList.append(csvFileRow)
return CsvFileStructure(csvFileHeadersList, csvFileRowsList)
# Write list of builds from a builds LOD to a CSV file meant to match the
# expected builds CSV file.
#
def writeExpectedBuildsListOfDictsToCsvFile(buildsLOD, csvFileName):
csvFileStruct = expectedBuildsListOfDictsToCsvFileStructure(buildsLOD)
with open(csvFileName, 'w') as csvFile:
csvFile.write(writeCsvFileStructureToStr(csvFileStruct))
g_testsWithIssueTrackersCsvFileHeadersRequired = \
('site', 'buildName', 'testname', 'issue_tracker_url', 'issue_tracker')
def getTestsWtihIssueTrackersListFromCsvFile(testsWithIssueTrackersFile):
return readCsvFileIntoListOfDicts(testsWithIssueTrackersFile,
g_testsWithIssueTrackersCsvFileHeadersRequired)
# Write list of tests from a Tests LOD to a CSV file structure meant to match
# tests with issue trackers CSV file.
#
def writeTestsListOfDictsToCsvFileStructure(testsLOD,
issueTrackerUrl="", issueTracker="",
):
csvFileHeadersList = copy.deepcopy(g_testsWithIssueTrackersCsvFileHeadersRequired)
csvFileRowsList = []
for testDict in testsLOD:
csvFileRow = (
testDict['site'],
testDict['buildName'],
testDict['testname'],
issueTrackerUrl, # issue_tracker_url
issueTracker, # issue_tracker
)
csvFileRowsList.append(csvFileRow)
return CsvFileStructure(csvFileHeadersList, csvFileRowsList)
# Write list of tests from a Tests LOD to a CSV file meant to match tests with
# issue trackers CSV file.
#
def writeTestsListOfDictsToCsvFile(testsLOD, csvFileName):
csvFileStruct = writeTestsListOfDictsToCsvFileStructure(testsLOD)
with open(csvFileName, 'w') as csvFile:
csvFile.write(writeCsvFileStructureToStr(csvFileStruct))
# Pretty print a nested Python data-structure to a file
#
# ToDo: Reimplement this to create a better looking set of indented that that
# involves less right-drift and the expense of more vertical space.
#
def pprintPythonDataToFile(pythonData, filePath):
with open(filePath,'w') as fileObj:
pp = pprint.PrettyPrinter(stream=fileObj, indent=2)
pp.pprint(pythonData)
# Get data off CDash and cache it or read from previously cached data
#
# If useCachedCDashData == True, then the file cdashQueryDataCacheFile must
# exist and will be used to get the data instead of calling CDash
#
# If alwaysUseCacheFileIfExists==True and the file cdashQueryDataCacheFile
# already exists, then the file cdashQueryDataCacheFile will be used to get
# the dta instead of calling CDash.
#
# Otherwise, CDash will be called at cdashQueryUrl to get the data and then
# the data will be written to the the file cdashQueryDataCacheFile if
# cdashQueryDataCacheFile != None.
#
# This function can be used to get data off of CDash using any page on CDash
# including cdash/api/v1/index.php, cdash/api/v1/queryTests.php and anything
# other PHP page that returns a JSON data structure (which is all of the
# cdash/api/v1/XXX.php pages).
#
def getAndCacheCDashQueryDataOrReadFromCache(
cdashQueryUrl,
cdashQueryDataCacheFile, # File name
useCachedCDashData, # If 'True', then cdasyQueryDataCacheFile must be non-null
alwaysUseCacheFileIfExists = False,
verbose = False,
extractCDashApiQueryData_in=extractCDashApiQueryData,
):
if (
alwaysUseCacheFileIfExists \
and cdashQueryDataCacheFile \
and os.path.exists(cdashQueryDataCacheFile) \
):
if verbose:
print(" Since the file exists, using cached data from file:\n"+\
" "+cdashQueryDataCacheFile )
with open(cdashQueryDataCacheFile, 'r') as cacheFile:
cdashQueryData=eval(cacheFile.read())
elif useCachedCDashData:
if verbose:
print(" Using cached data from file:\n "+cdashQueryUrl )
with open(cdashQueryDataCacheFile, 'r') as cacheFile:
cdashQueryData=eval(cacheFile.read())
else:
if verbose:
print(" Downloading CDash data from:\n "+cdashQueryUrl )
cdashQueryData = extractCDashApiQueryData_in(cdashQueryUrl)
if cdashQueryDataCacheFile:
if verbose:
print(" Caching data downloaded from CDash to file:\n "+\
cdashQueryDataCacheFile)
pprintPythonDataToFile(cdashQueryData, cdashQueryDataCacheFile)
return cdashQueryData
def normalizeUrlStrings(*args):
return [urlquote(x) for x in args]
# Construct full cdash/api/v1/index.php query URL to pull data down given the
# pieces
def getCDashIndexQueryUrl(cdashUrl, projectName, date, filterFields):
# for legacy reasons, this function assumes we normalized projectName
projectName, = normalizeUrlStrings(projectName,)
if date: dateArg = "&date="+date
else: dateArg = ""
return cdashUrl+"/api/v1/index.php?project="+projectName+dateArg \
+ "&"+filterFields
# Construct full cdash/index.php browser URL given the pieces
def getCDashIndexBrowserUrl(cdashUrl, projectName, date, filterFields):
# for legacy reasons, this function assumes we normalized projectName
projectName, = normalizeUrlStrings(projectName,)
if date: dateArg = "&date="+date
else: dateArg = ""
return cdashUrl+"/index.php?project="+projectName+dateArg \
+ "&"+filterFields
# Construct full cdash/api/v1/queryTests.php query URL given the pieces
def getCDashQueryTestsQueryUrl(cdashUrl, projectName, date, filterFields):
# for legacy reasons, this function assumes we normalized projectName
projectName, = normalizeUrlStrings(projectName,)
if date: dateArg = "&date="+date
else: dateArg = ""
cdashTestUrl = cdashUrl+"/api/v1/queryTests.php?project="+projectName+dateArg+"&"+filterFields
return cdashTestUrl
# Construct full cdash/queryTests.php browser URL given the pieces
def getCDashQueryTestsBrowserUrl(cdashUrl, projectName, date, filterFields):
# for legacy reasons, this function assumes we normalized projectName
projectName, = normalizeUrlStrings(projectName,)
if date: dateArg = "&date="+date
else: dateArg = ""
return cdashUrl+"/queryTests.php?project="+projectName+dateArg+"&"+filterFields
# Construct full cdash/api/v1/buildSummary.php query URL given the buildId
def getCDashBuildSummaryQueryUrl(cdashUrl, buildId):
return cdashUrl+"/api/v1/buildSummary.php?buildid="+buildId
# Construct full cdash/build browser URL given the buildId
def getCDashBuildSummaryBrowserUrl(cdashUrl, buildId):
return cdashUrl+"/build/"+buildId
# Copy a key/value pair from one dict to another if it eixsts
def copyKeyDictIfExists(sourceDict_in, keyName_in, dict_inout):
value = sourceDict_in.get(keyName_in, None)
if value:
dict_inout.update( { keyName_in : value } )
# Extend the set of fields for a CDash index.phpb build dict
#
# buildDict_in [in]: The build dict gotten from cdash/index.php. This will be
# modified in place.
#
# Returns the modified build dict.
#
# Change this to get all of the fields and add the 'group' field as well.
#
def extendCDashIndexBuildDict(buildDict_in, groupName):
buildDict = buildDict_in
buildDict[u'group'] = groupName
return buildDict
# Given the full Python JSON data-structure returned from the page
# cdash/api/v1/index.php query from extractCDashApiQueryData(), return a
# flattened-out data-structure that is easier to manipulate.
#
# This function takes in the JSON data-structure (as a nested set of Python
# dicts and listed) directly returned from a query gotten from the page
# cdash/api/v1/index.php with some filters.
#
# The input full CDash index.php JSON data-structure has the following
# structure and fields of interest:
#
# fullCDashIndexBuildsJson =
# {
# 'all_buildgroups': [ {'id':1,'name:"Nightly"}, ...],
# 'buildgroups': [
# {
# 'name':"???", # group name, e.g. Nightly
# 'builds":[
# {
# 'site':"???"
# 'buildname':"???",
# 'update': {'errors':???, ...},
# 'configure':{'error': ???, ...},
# 'compilation':{'error': ???, ...},
# 'test': {'fail':???, 'notrun':???, 'pass':???, ...},
# ...
# },
# ...
# ]
# },
# ...
# ...
# ]
# },
# ...
# }
#
# This function gets the data from *all* of the collapsed builds and returns
# the flatten-out list of dicts for each build with the 'group' field added in
# as:
#
# [
# {
# 'group':"???",
# 'site':"???",
# 'buildname':"???",
# 'update': {'errors':???, ...},
# 'configure':{'error': ???, ...},
# 'compilation':{'error': ???, ...},
# 'test': {'fail':???, 'notrun':???, 'pass':???, ...},
# ...
# },
# ...
# ]
#
# This collects *all* of the builds from all of the build groups provided by
# that data-structure, not just the 'Nighlty' build group. Therefore, if you
# want to only consider one set of build groups, you need to add that to the
# CDash query URL (e.g. group='Nighlty').
#
def flattenCDashIndexBuildsToListOfDicts(fullCDashIndexBuildsJson):
summaryCDashIndexBuilds = []
for buildgroup in fullCDashIndexBuildsJson["buildgroups"]:
groupName = buildgroup["name"]
for build in buildgroup["builds"]:
summaryBuild = extendCDashIndexBuildDict(build, groupName)
summaryCDashIndexBuilds.append(summaryBuild)
return summaryCDashIndexBuilds
# Given the full JSON data-structure returned from the page
# cdash/api/v1/queryTests.php query from extractCDashApiQueryData(), return a
# flattened-out data-structure that is easier to manipulate.
#
# This function takes in the JSON data-structure (as a nested set of Python
# dicts and listed) directly returned from a query gotten from the page
# cdash/api/v1/queryTests.php with some filters.
#
# The input full CDash queryTests.php JSON data-structure has the following
# structure and fields of interest:
#
# fullCDashQueryTestsJson =
# {
# 'version':???,
# 'feed_enabled':???,
# ...
# 'builds': [
# {
# 'buildName': 'Trilinos-atdm-mutrino-intel-opt-openmp-HSW',
# 'buildSummaryLink': 'buildSummary.php?buildid=4109735',
# 'buildstarttime': '2018-10-29T05:54:03 UTC',
# 'details': 'Completed (Failed)\n',
# 'nprocs': 4,
# 'prettyProcTime': '40s 400ms',
# 'prettyTime': '10s 100ms',
# 'procTime': 40.4,
# 'site': 'mutrino',
# 'siteLink': 'viewSite.php?siteid=223',
# 'status': 'Failed',
# 'statusclass': 'error',
# 'testDetailsLink': 'testDetails.php?test=57925465&build=4109735',
# 'testname': 'Anasazi_Epetra_BKS_norestart_test_MPI_4',
# 'time': 10.1
# },
# ...
# ],
# ...
# }
#
# This function gets the data from *all* of the tests and returns the
# flatten-out list of dicts with some additional fields for each test of the
# form:
#
# [
# {
# 'buildName': 'Trilinos-atdm-mutrino-intel-opt-openmp-HSW',
# 'buildSummaryLink': 'buildSummary.php?buildid=4109735',
# 'buildstarttime': '2018-10-29T05:54:03 UTC',
# 'details': 'Completed (Failed)\n',
# 'nprocs': 4,
# 'prettyProcTime': '40s 400ms',
# 'prettyTime': '10s 100ms',
# 'procTime': 40.4,
# 'site': 'mutrino',
# 'siteLink': 'viewSite.php?siteid=223',
# 'status': 'Failed',
# 'statusclass': 'error',
# 'testDetailsLink': 'testDetails.php?test=57925465&build=4109735',
# 'testname': 'Anasazi_Epetra_BKS_norestart_test_MPI_4',
# 'time': 10.1,
# },
# ...
# ]
#
# NOTE: This does a shallow copy so any modifications to the returned list and
# dicts will modify the original data-structure fullCDashQueryTestsJson. If
# that is a problem, then make sure and do a deep copy before passing in
# fullCDashQueryTestsJson.
#
# This collects *all* of the tests from all of the "build" list provided by
# the CDash JSON data-structure. Therefore, if you want to only consider one
# set of build groups, you need to add that to the CDash query URL
# (e.g. buildName='<build-name>').
#
def flattenCDashQueryTestsToListOfDicts(fullCDashQueryTestsJson):
testsListOfDicts = []
for testDict in fullCDashQueryTestsJson['builds']:
testsListOfDicts.append(testDict)
return testsListOfDicts
# Create a lookup dict for a list of dicts
#
# listOfDicts [in/out]: List of dict objects that have keys that one will want
# to lookup the dict based on their values. May have 100% duplicate elements
# removed from the list.
#
# listOfKeys [in]: List of the names of keys in these dicts that are used to
# build a search dict data-structure which is returned from this function.
#
# removeExactDuplicateElements [in]: If True, then dict elements that are 100%
# duplicates and have the exact same key/value pairs will be removed from
# listOfDicts. (default False)
#
# checkDictsAreSame_in [in]: Allows specialization of the check for exact dict
# matches and reporting the differences. The default value is the function
# checkDictsAreSame(). Any Python object that has the __call__() operator
# function defined that takes those same arguments and returns the same
# outputs as the function checkDictsAreSame() can be passed in.
#
# If listOfDicts has any elements that are 100% complete duplicates with the
# same exact key/value pairs, then the later elements will be removed from the
# list. But if just the key/value pairs listed in listOfKeys are duplicated
# but one or more of the other key/value pairs is different, then then an
# exception is thrown.
#
# NOTE: This is an implementation function that is used in the class
# SearchableListOfDicts. Please use that class instead of this raw function.
#
def createLookupDictForListOfDicts(listOfDicts, listOfKeys,
removeExactDuplicateElements=False, checkDictsAreSame_in=checkDictsAreSame,
):
# Build the lookup dict data-structure. Also, optionally mark any 100%
# duplicate elements if asked to remove 100% duplicate elements.
lookupDict = {} ; idx = 0 ; numRemoved = 0 ; duplicateIndexesToRemoveFromList = []
for dictEle in listOfDicts:
# Create the structure of recursive dicts for the keys in order
currentLookupDictRef = lookupDict
lastLookupDictRef = None
for key in listOfKeys:
keyValue = dictEle[key]
lastLookupDictRef = currentLookupDictRef
nextLookupDictRef = currentLookupDictRef.setdefault(keyValue, {})
currentLookupDictRef = nextLookupDictRef
addEle = True
# Check to see if this dict has already been added
if currentLookupDictRef:
lookedUpDict = currentLookupDictRef.get('dict', None)
lookedUpIdx = currentLookupDictRef.get('idx', None)
(hasSameKeyValuePairs, dictDiffErrorMsg) = checkDictsAreSame_in(
dictEle, "listOfDicts["+str(idx)+"]",
lookedUpDict, "listOfDicts["+str(lookedUpIdx)+"]" )
if hasSameKeyValuePairs and removeExactDuplicateElements:
# This is a 100% duplicate element to one previously added.
# Therefore, mark this duplicate element to be removed.
duplicateIndexesToRemoveFromList.append(idx)
addEle = False
else:
raiseDuplicateDictEleException(idx, dictEle, listOfKeys, lookedUpIdx,
lookedUpDict, dictDiffErrorMsg)
# Need to go back and reset the dict on the last dict in the
# data-structure so that modifications to the dicts that are looked up
# will modify the original list.
if addEle:
currentLookupDictRef.update({'dict':dictEle, 'idx':idx-numRemoved})
else:
numRemoved += 1
idx += 1
# Remove 100% duplicate elements marked above
removeElementsFromListGivenIndexes(listOfDicts, duplicateIndexesToRemoveFromList)
return lookupDict
def raiseDuplicateDictEleException(idx, dictEle, listOfKeys,
lookedUpIdx, lookedUpDict, dictDiffErrorMsg,
):
raise Exception(
"Error, The element\n\n"+\
" listOfDicts["+str(idx)+"] =\n\n"+\
" "+sorted_dict_str(dictEle)+"\n\n"+\
" has duplicate values for the list of keys\n\n"+\
" "+str(listOfKeys)+"\n\n"+\
" with the element already added\n\n"+\
" listOfDicts["+str(lookedUpIdx)+"] =\n\n"+\