@@ -516,15 +516,39 @@ def benchSuiteName(self, bmSuiteArgs):
516516 def subgroup (self ):
517517 return SUBGROUP_GRAAL_PYTHON
518518
519+ def with_branch_and_commit_dict (self , d ):
520+ """
521+ We run our benchmark from the graalpython directories, but with other
522+ suites as primary suites in the CI, so we potentially want to update
523+ branch and commit info.
524+ """
525+ if mx .primary_suite ().dir != os .getcwd ():
526+ if any (os .path .isdir (d ) and d .startswith ("mx.graalpython" ) for d in os .listdir ()):
527+ vc = SUITE .vc
528+ if vc is None :
529+ return d
530+ branch = vc .active_branch (SUITE .dir , abortOnError = False ) or "<unknown>"
531+ info = vc .parent_info (SUITE .dir )
532+ url = vc .default_pull (SUITE .dir , abortOnError = False ) or "unknown"
533+ d .update ({
534+ "branch" : branch ,
535+ "commit.rev" : vc .parent (SUITE .dir ),
536+ "commit.repo-url" : url ,
537+ "commit.author" : info ["author" ],
538+ "commit.author-ts" : info ["author-ts" ],
539+ "commit.committer" : info ["committer" ],
540+ "commit.committer-ts" : info ["committer-ts" ],
541+ })
542+ return d
543+
519544 def rules (self , output , benchmarks , bm_suite_args ):
520545 bench_name = self .get_bench_name (benchmarks )
521546 arg = self .get_arg (bench_name )
522-
523547 return [
524548 # warmup curves
525549 StdOutRule (
526550 r"^### iteration=(?P<iteration>[0-9]+), name=(?P<benchmark>[a-zA-Z0-9._\-]+), duration=(?P<time>[0-9]+(\.[0-9]+)?$)" , # pylint: disable=line-too-long
527- {
551+ self . with_branch_and_commit_dict ( {
528552 "benchmark" : '{}.{}' .format (self ._name , bench_name ),
529553 "metric.name" : "warmup" ,
530554 "metric.iteration" : ("<iteration>" , int ),
@@ -534,12 +558,12 @@ def rules(self, output, benchmarks, bm_suite_args):
534558 "metric.score-function" : "id" ,
535559 "metric.better" : "lower" ,
536560 "config.run-flags" : "" .join (arg ),
537- }
561+ })
538562 ),
539563 # secondary metric(s)
540564 StdOutRule (
541565 r"### WARMUP detected at iteration: (?P<endOfWarmup>[0-9]+$)" ,
542- {
566+ self . with_branch_and_commit_dict ( {
543567 "benchmark" : '{}.{}' .format (self ._name , bench_name ),
544568 "metric.name" : "end-of-warmup" ,
545569 "metric.iteration" : 0 ,
@@ -549,13 +573,13 @@ def rules(self, output, benchmarks, bm_suite_args):
549573 "metric.score-function" : "id" ,
550574 "metric.better" : "lower" ,
551575 "config.run-flags" : "" .join (arg ),
552- }
576+ })
553577 ),
554578
555579 # no warmups
556580 StdOutRule (
557581 r"^@@@ name=(?P<benchmark>[a-zA-Z0-9._\-]+), duration=(?P<time>[0-9]+(\.[0-9]+)?$)" , # pylint: disable=line-too-long
558- {
582+ self . with_branch_and_commit_dict ( {
559583 "benchmark" : '{}.{}' .format (self ._name , bench_name ),
560584 "metric.name" : "time" ,
561585 "metric.iteration" : 0 ,
@@ -565,7 +589,7 @@ def rules(self, output, benchmarks, bm_suite_args):
565589 "metric.score-function" : "id" ,
566590 "metric.better" : "lower" ,
567591 "config.run-flags" : "" .join (arg ),
568- }
592+ })
569593 ),
570594 ]
571595
@@ -731,12 +755,11 @@ class PythonVmWarmupBenchmarkSuite(PythonBenchmarkSuite):
731755 def rules (self , output , benchmarks , bm_suite_args ):
732756 bench_name = self .get_bench_name (benchmarks )
733757 arg = self .get_arg (bench_name )
734-
735758 return [
736759 # startup (difference between start of VM to end of first iteration)
737760 StdOutRule (
738761 r"### STARTUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)" ,
739- {
762+ self . with_branch_and_commit_dict ( {
740763 "benchmark" : '{}.{}' .format (self ._name , bench_name ),
741764 "metric.name" : "startup" ,
742765 "metric.iteration" : ("<iteration>" , int ),
@@ -746,12 +769,12 @@ def rules(self, output, benchmarks, bm_suite_args):
746769 "metric.score-function" : "id" ,
747770 "metric.better" : "lower" ,
748771 "config.run-flags" : "" .join (arg ),
749- }
772+ })
750773 ),
751774
752775 StdOutRule (
753776 r"### EARLY WARMUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)" ,
754- {
777+ self . with_branch_and_commit_dict ( {
755778 "benchmark" : '{}.{}' .format (self ._name , bench_name ),
756779 "metric.name" : "early-warmup" ,
757780 "metric.iteration" : ("<iteration>" , int ),
@@ -761,12 +784,12 @@ def rules(self, output, benchmarks, bm_suite_args):
761784 "metric.score-function" : "id" ,
762785 "metric.better" : "lower" ,
763786 "config.run-flags" : "" .join (arg ),
764- }
787+ })
765788 ),
766789
767790 StdOutRule (
768791 r"### LATE WARMUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)" ,
769- {
792+ self . with_branch_and_commit_dict ( {
770793 "benchmark" : '{}.{}' .format (self ._name , bench_name ),
771794 "metric.name" : "late-warmup" ,
772795 "metric.iteration" : ("<iteration>" , int ),
@@ -776,7 +799,7 @@ def rules(self, output, benchmarks, bm_suite_args):
776799 "metric.score-function" : "id" ,
777800 "metric.better" : "lower" ,
778801 "config.run-flags" : "" .join (arg ),
779- }
802+ })
780803 ),
781804 ]
782805
0 commit comments