Skip to content

Conversation

@enourbakhsh
Copy link
Contributor

@enourbakhsh enourbakhsh commented May 8, 2025

Checklist

  • ran Jenkins
  • added a release note for user-visible changes to doc/changes

@codecov
Copy link

codecov bot commented May 8, 2025

❌ 2 Tests Failed:

Tests completed Failed Passed Skipped
46 2 44 0
View the top 2 failed test(s) by shortest run time
tests/test_cliCmdReport.py::ReportTest::test_aggregate_reports
Stack Traces | 1.89s run time
self = <test_cliCmdReport.ReportTest testMethod=test_aggregate_reports>

    def test_aggregate_reports(self):
        """Test `pipetask aggregate-reports` command. We make one
        `SimpleQgraph` and then fake a copy in a couple of different ways,
        making sure we can aggregate the similar graphs.
        """
        metadata = {"output_run": "run1"}
        butler, qgraph1 = makeSimpleQGraph(
            run="run",
            root=self.root,
            metadata=metadata,
        )
    
        # Check that we can get the proper run collection from the qgraph
        self.assertEqual(check_output_run(qgraph1, "run"), [])
    
        # Save the graph
        graph_uri_1 = os.path.join(self.root, "graph1.qgraph")
        qgraph1.saveUri(graph_uri_1)
    
        file1 = os.path.join(self.root, "report_test_1.json")
        file2 = os.path.join(self.root, "report_test_2.json")
        aggregate_file = os.path.join(self.root, "aggregate_report.json")
    
        report1 = self.runner.invoke(
            pipetask_cli,
            [
                "report",
                self.root,
                graph_uri_1,
                "--no-logs",
                "--full-output-filename",
                file1,
                "--force-v2",
            ],
            input="no",
        )
    
>       self.assertEqual(report1.exit_code, 0, clickResultMsg(report1))
E       AssertionError: 1 != 0 : 
E       output: Error:   File ".../cli/cmd/commands.py", line 437, in report
E           script.report_v2(
E         File ".../cli/script/report.py", line 255, in report_v2
E           print_summary(
E         File ".../cli/script/report.py", line 374, in print_summary
E           exception_diagnostics_table = summary.pprint(
E                                         ^^^^^^^^^^^^^^^
E       Summary.pprint() got an unexpected keyword argument 'show_exception_diagnostics'
E       
E       exception: 1
E       traceback:   File ".../test/lib/python3.12.../site-packages/click/testing.py", line 504, in invoke
E           return_value = cli.main(args=args or (), prog_name=prog_name, **extra)
E                          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
E         File ".../test/lib/python3.12.../site-packages/click/core.py", line 1401, in main
E           sys.exit(e.exit_code)

tests/test_cliCmdReport.py:301: AssertionError
tests/test_cliCmdReport.py::ReportTest::test_report
Stack Traces | 3.48s run time
self = <test_cliCmdReport.ReportTest testMethod=test_report>

    def test_report(self):
        """Test for making a report on the produced, missing and expected
        datasets in a quantum graph.
        """
        metadata = {"output_run": "run"}
        butler, qgraph = makeSimpleQGraph(
            run="run",
            root=self.root,
            metadata=metadata,
        )
        # Check that we can get the proper run collection from the qgraph
        self.assertEqual(check_output_run(qgraph, "run"), [])
    
        graph_uri = os.path.join(self.root, "graph.qgraph")
        qgraph.saveUri(graph_uri)
    
        test_filename = os.path.join(self.root, "report_test.yaml")
    
        result = self.runner.invoke(
            pipetask_cli,
            ["report", self.root, graph_uri, "--full-output-filename", test_filename, "--no-logs"],
            input="no",
        )
    
        # Check that we can read from the command line
        self.assertEqual(result.exit_code, 0, clickResultMsg(result))
    
        # Check that we can open and read the file produced by make_reports
        with open(test_filename) as f:
            report_output_dict = yaml.load(f, Loader=SafeLoader)
    
        self.assertIsNotNone(report_output_dict["task0"])
        self.assertIsNotNone(report_output_dict["task0"]["failed_quanta"])
        self.assertIsInstance(report_output_dict["task0"]["n_expected"], int)
    
        result_hr = self.runner.invoke(
            pipetask_cli,
            ["report", self.root, graph_uri, "--no-logs"],
            input="no",
        )
    
        # Check that we can read from the command line
        self.assertEqual(result_hr.exit_code, 0, clickResultMsg(result_hr))
    
        # Check that we get string output
        self.assertIsInstance(result_hr.stdout, str)
    
        # Check that task0 and the failed quanta for task0 exist in the string
        self.assertIn("task0", result_hr.stdout)
        self.assertIn("Failed", result_hr.stdout)
        self.assertIn("Expected", result_hr.stdout)
        self.assertIn("Succeeded", result_hr.stdout)
    
        # Check brief option for pipetask report
        result_brief = self.runner.invoke(
            pipetask_cli,
            ["report", self.root, graph_uri, "--no-logs", "--brief"],
            input="no",
        )
        self.assertIsInstance(result_brief.stdout, str)
    
        # Check that task0 and the failed quanta for task0 exist in the string
        self.assertIn("task0", result_brief.stdout)
        self.assertIn("Failed", result_brief.stdout)
        self.assertIn("Expected", result_brief.stdout)
        self.assertIn("Succeeded", result_brief.stdout)
    
        # Test cli for the QPG
        result_v2_terminal_out = self.runner.invoke(
            pipetask_cli,
            ["report", self.root, graph_uri, "--no-logs", "--force-v2"],
            input="no",
        )
    
        # Check that we can read from the command line
>       self.assertEqual(result_v2_terminal_out.exit_code, 0, clickResultMsg(result_v2_terminal_out))
E       AssertionError: 1 != 0 : 
E       output: Error:   File ".../cli/cmd/commands.py", line 437, in report
E           script.report_v2(
E         File ".../cli/script/report.py", line 255, in report_v2
E           print_summary(
E         File ".../cli/script/report.py", line 374, in print_summary
E           exception_diagnostics_table = summary.pprint(
E                                         ^^^^^^^^^^^^^^^
E       Summary.pprint() got an unexpected keyword argument 'show_exception_diagnostics'
E       
E       exception: 1
E       traceback:   File ".../test/lib/python3.12.../site-packages/click/testing.py", line 504, in invoke
E           return_value = cli.main(args=args or (), prog_name=prog_name, **extra)
E                          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
E         File ".../test/lib/python3.12.../site-packages/click/core.py", line 1401, in main
E           sys.exit(e.exit_code)

tests/test_cliCmdReport.py:155: AssertionError

To view more test analytics, go to the Test Analytics Dashboard
📋 Got 3 mins? Take this short survey to help us improve Test Analytics.

@enourbakhsh enourbakhsh force-pushed the tickets/DM-50550 branch 17 times, most recently from fcd4926 to 25d25f4 Compare May 11, 2025 21:58
Copy link
Contributor

@cmsaunders cmsaunders left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks fine, except for some small points I have commented on. I can't tell if the mypy changes are needed or not, since it's still not passing the checks.



@click.command(cls=PipetaskCLI, context_settings=dict(help_option_names=["-h", "--help"]))
@click.command(cls=PipetaskCLI, context_settings=dict(help_option_names=["-h", "--help"])) # type: ignore
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you know why you had to add this? I see it's part of the commit "Ignore mypy abstract class error", but mypy still doesn't seem to like it.

interactive (searchable and sortable) using Astropy's JSViewer writer.
This is a troubleshooting-oriented report of the exceptions combined
with the exposure dimension records.
show_exception_diagnostics : `bool`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"`bool`, optional"

and for searching and counting specific kinds or instances of failures.
This option will also print a "brief" (counts-only) summary to stdout.
logs : `bool`
exception_diagnostics_filename : `str`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't this be an optional argument?

if ext in write_formats:
return ext

return default
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A WARNING would be helpful here so the user knows the default will be used.

full_output_filename : `str` or `None`
Name of the JSON file in which to store summary information, if
passed.
exception_diagnostics_filename : `str` or `None`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add ", optional" here and in the following docstrings, like you did above.

@timj
Copy link
Member

timj commented May 27, 2025

To get everything passing you will need to modify requirements.txt to make it point at the pipe_base ticket branch.

@timj timj force-pushed the tickets/DM-50550 branch from dd1d648 to 85232cc Compare October 29, 2025 20:40
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants