diff --git a/.github/workflows/run-benchmark.yml b/.github/workflows/run-benchmark.yml index 04ea390..8769d88 100644 --- a/.github/workflows/run-benchmark.yml +++ b/.github/workflows/run-benchmark.yml @@ -44,36 +44,43 @@ jobs: cd $GITHUB_WORKSPACE/benchmarks/linear-elastic-plate-with-hole/ python generate_config.py + - name: generate-snakemake-fenics-workflow + shell: bash -l {0} + run: | + cd $GITHUB_WORKSPACE/benchmarks/common/ + python simulation_tool_snakefile_generator.py --benchmark_name linear-elastic-plate-with-hole --tool fenics --simulation_script run_fenics_simulation.py --environment_file environment_simulation.yml + - name: run_linear-elastic-plate-with-hole-benchmarks_snakemake shell: bash -l {0} run: | cd $GITHUB_WORKSPACE/benchmarks/linear-elastic-plate-with-hole/ - snakemake --use-conda --force --cores 'all' - snakemake --use-conda --force --cores all \ + snakemake --use-conda --force --cores 'all' --config tool="fenics" + snakemake --use-conda --force --cores all --config tool="fenics" \ --reporter metadata4ing \ --report-metadata4ing-paramscript ../common/parameter_extractor.py \ - --report-metadata4ing-filename snakemake_provenance - unzip snakemake_provenance -d snakemake_provenance - - - name: run_linear-elastic-plate-with-hole-benchmarks_nextflow - shell: bash -l {0} - run: | - cd $GITHUB_WORKSPACE/benchmarks/linear-elastic-plate-with-hole/ - nextflow run main.nf -params-file workflow_config.json -c ../common/nextflow.config -plugins nf-prov@1.4.0 + --report-metadata4ing-filename snakemake_provenance_fenics + unzip snakemake_provenance_fenics -d snakemake_provenance_fenics - - name: Archive Linear Elastic plate with a hole benchmark data for snakemake + snakemake --use-conda --force --cores 'all' --config tool="kratos" + snakemake --use-conda --force --cores all --config tool="kratos" \ + --reporter metadata4ing \ + --report-metadata4ing-paramscript ../common/parameter_extractor.py \ + --report-metadata4ing-filename snakemake_provenance_kratos + unzip snakemake_provenance_kratos -d snakemake_provenance_kratos + + - name: Archive Linear Elastic plate with a hole benchmark data for snakemake fenics uses: actions/upload-artifact@v4 with: - name: snakemake_results_linear-elastic-plate-with-hole + name: snakemake_fenics_result_linear-elastic-plate-with-hole path: | - benchmarks/linear-elastic-plate-with-hole/snakemake_provenance/ + benchmarks/linear-elastic-plate-with-hole/snakemake_provenance_fenics/ - - name: Archive Linear Elastic plate with a hole benchmark data for nextflow + - name: Archive Linear Elastic plate with a hole benchmark data for snakemake kratos uses: actions/upload-artifact@v4 with: - name: nextflow_results_linear-elastic-plate-with-hole + name: snakemake_kratos_result_linear-elastic-plate-with-hole path: | - benchmarks/linear-elastic-plate-with-hole/nextflow_results/ + benchmarks/linear-elastic-plate-with-hole/snakemake_provenance_kratos/ process-artifacts: runs-on: ubuntu-latest @@ -82,11 +89,17 @@ jobs: - name: Checkout repo content uses: actions/checkout@v2 - - name: Download artifact + - name: Download snakemake fenics artifact + uses: actions/download-artifact@v4 + with: + name: snakemake_fenics_result_linear-elastic-plate-with-hole + path: ./snakemake_fenics_provenance + + - name: Download snakemake kratos artifact uses: actions/download-artifact@v4 with: - name: snakemake_results_linear-elastic-plate-with-hole - path: ./snakemake_provenance + name: snakemake_kratos_result_linear-elastic-plate-with-hole + path: ./snakemake_kratos_provenance - name: Setup Mambaforge with postprocessing env uses: conda-incubator/setup-miniconda@v3 @@ -99,7 +112,7 @@ jobs: - name: Run plotting script shell: bash -l {0} run: | - python benchmarks/linear-elastic-plate-with-hole/plot_metrics.py ./snakemake_provenance + python benchmarks/linear-elastic-plate-with-hole/plot_metrics.py ./snakemake_fenics_provenance ./snakemake_kratos_provenance - name: Upload PDF plot as artifact uses: actions/upload-artifact@v4 diff --git a/.gitignore b/.gitignore deleted file mode 100644 index f684468..0000000 --- a/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.snakemake -site \ No newline at end of file diff --git a/benchmarks/common/nextflow.config b/benchmarks/common/nextflow.config deleted file mode 100644 index 36401ae..0000000 --- a/benchmarks/common/nextflow.config +++ /dev/null @@ -1,27 +0,0 @@ -conda { - enabled = true -} - -params.result_dir = "nextflow_results/${params.benchmark}" - -prov { - formats { - dag { - file = "${params.result_dir}/nf_prov_dag.html" - overwrite = true - } - legacy { - file = "${params.result_dir}/nf_prov_legacy.json" - overwrite = true - } - wrroc { - agent { - name = 'Firstname Lastname' - orcid = 'https://orcid.org/0000-0000-0000-0000' - } - file = "${params.result_dir}/ro-crate-metadata.json" - license = 'https://spdx.org/licenses/MIT' - overwrite = true - } - } -} diff --git a/benchmarks/common/simulation_tool_snakefile_generator.py b/benchmarks/common/simulation_tool_snakefile_generator.py new file mode 100755 index 0000000..a252b52 --- /dev/null +++ b/benchmarks/common/simulation_tool_snakefile_generator.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +""" +Script to generate a Snakefile for simulation tools in the benchmark workflow. + +This script creates a standardized Snakefile that follows the expected output format +for integration with the main benchmark workflow. +""" + +import argparse +import os +from pathlib import Path + + +def generate_snakefile(benchmark_name: str, tool_name: str, environment_file: str, simulation_script: str, output_path: str = None): + """ + Generate a Snakefile for a simulation tool. + + Args: + tool_name: Name of the simulation tool (e.g., 'fenics', 'kratos') + environment_file: Path to the conda environment YAML file (relative to tool directory) + simulation_script: Path to the simulation Python script (relative to tool directory) + output_path: Optional path where to save the Snakefile. If None, saves to {tool_name}/Snakefile + """ + + # Load template from external file + template_path = Path(__file__).parent / "snakefile_template.txt" + with open(template_path, 'r') as f: + snakefile_template = f.read() + + # Replace placeholders with actual values + snakefile_content = snakefile_template.replace("{TOOL_NAME}", tool_name) \ + .replace("{SIMULATION_SCRIPT}", simulation_script) \ + .replace("{ENVIRONMENT_FILE}", environment_file) + + # Determine output path + if output_path is None: + output_path = f"../{benchmark_name}/{tool_name}/Snakefile" + + # Write the Snakefile + with open(output_path, 'w') as f: + f.write(snakefile_content) + + print(f"Snakefile generated successfully: {output_path}") + +def main(): + parser = argparse.ArgumentParser( + description="Generate a simulation-tool-specific Snakefile for running the simulation.", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog= + """ + !IMPORTANT!:The scripts to be stored inside the tool's sub-directory in the benchmark folder. E.g., benchmarks/linear-elastic-plate-with-hole/tool_name/ + + Execution example for fenics: + python generate_tool_snakefile.py --tool fenics --env environment_simulation.yml --script run_fenics_simulation.py + + The simulation script must accept these command-line arguments: + --input_parameter_file: JSON file with simulation parameters + --input_mesh_file: Input mesh file (.msh format) + --output_solution_file_zip: Output ZIP file containing solution visualization files (VTK) + --output_metrics_file: Output JSON file with computed metrics + + """ + ) + parser.add_argument( + '--benchmark_name', + type=str, + required=True, + help='Name of the benchmark (same as the benchmark directory name)' + ) + + parser.add_argument( + '--tool', + type=str, + required=True, + help='Name of the simulation tool (e.g., fenics, kratos, abaqus)' + ) + + parser.add_argument( + '--environment_file', + type=str, + required=True, + help='Conda environment YAML file name' + ) + + parser.add_argument( + '--simulation_script', + type=str, + required=True, + help='Simulation script name' + ) + + args = parser.parse_args() + + generate_snakefile( + benchmark_name=args.benchmark_name, + tool_name=args.tool, + environment_file=args.environment_file, + simulation_script=args.simulation_script + ) + +if __name__ == "__main__": + main() + + diff --git a/benchmarks/linear-elastic-plate-with-hole/fenics/Snakefile b/benchmarks/common/snakefile_template.txt similarity index 58% rename from benchmarks/linear-elastic-plate-with-hole/fenics/Snakefile rename to benchmarks/common/snakefile_template.txt index d82cd75..a3a5170 100644 --- a/benchmarks/linear-elastic-plate-with-hole/fenics/Snakefile +++ b/benchmarks/common/snakefile_template.txt @@ -1,23 +1,23 @@ import json import os -tool = "fenics" +tool = "{TOOL_NAME}" result_dir = "snakemake_results/" + config["benchmark"] configuration_to_parameter_file = config["configuration_to_parameter_file"] configurations = config["configurations"] -rule run_fenics_simulation: +rule run_{TOOL_NAME}_simulation: input: - script = "{tool}/run_fenics_simulation.py", + script = f"{tool}/{SIMULATION_SCRIPT}", parameters = lambda wildcards: configuration_to_parameter_file[wildcards.configuration], - mesh = f"{result_dir}/mesh/mesh_{{configuration}}.msh", + mesh = f"{result_dir}/{tool}/mesh/mesh_{{configuration}}.msh", output: - zip = f"{result_dir}/{{tool}}/solution_field_data_{{configuration}}.zip", - metrics = f"{result_dir}/{{tool}}/solution_metrics_{{configuration}}.json", + zip = f"{result_dir}/{tool}/solution_field_data_{{configuration}}.zip", + metrics = f"{result_dir}/{tool}/solution_metrics_{{configuration}}.json", conda: - "environment_simulation.yml", + "{ENVIRONMENT_FILE}", shell: """ python3 {input.script} --input_parameter_file {input.parameters} --input_mesh_file {input.mesh} --output_solution_file_zip {output.zip} --output_metrics_file {output.metrics} - """ \ No newline at end of file + """ diff --git a/benchmarks/linear-elastic-plate-with-hole/README.md b/benchmarks/linear-elastic-plate-with-hole/README.md deleted file mode 100644 index 7c3f3ab..0000000 --- a/benchmarks/linear-elastic-plate-with-hole/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# Infinite plate with hole benchmark in FEniCSx - -## Problem Definition - -See the [documentation](../../docs/benchmarks/linear%20elasticity/index.md) for a detailed problem definition and mathematical formulation. - -## Running the Benchmark - -1. **Generate Configuration Files** - - Before running the benchmark, generate the configuration files using: - ```bash - python generate_config.py - ``` - This script creates a workflow_config.json that defines what configurations are computed (in the starndard case one for each parameter_file_*.json) and what tools are used. - -2. **Run the Benchmark** - - The benchmark is managed via Snakemake. You can run all tools or specify a subset (e.g., only fenics) by editing the generated config or passing parameters via the command line: - ```bash - snakemake --use-conda --cores all - ``` - To run only a specific tool or specific configuration (e.g. for testing), update the config file or use: - ```bash - snakemake --use-conda --cores all --config tools=fenics - ``` - -3. **Collect Provenance** - - After running, provenance data is collected automatically and stored in .snakemake. If you want to use the reporter plugin ([metadata4ing](https://github.com/izus-fokus/snakemake-report-plugin-metadata4ing)) to generate an ROCrate, call snakemake again (make sure the plugin is added to the environment): - ```bash - snakemake --use-conda --cores all --reporter metadata4ing - ``` - Output and provenance files are stored in the `snakemake_results/` directory and as zipped archives. - -## Hierarchical Structure of Snakefiles - -The workflow is organized hierarchically: -- The main `Snakefile` orchestrates the benchmark and includes sub-workflows for each tool. -- Each tool (e.g., fenics) has its own rules and output structure. - -### Inputs - -Each tool's rule must accept: -- A parameter/configuration file (e.g., `parameters_*.json`) specifying geometry, material properties, boundary conditions, and solver settings. -- A mesh file (*.msh from gmsh) generated with the rule generate_mesh. - -### Outputs - -Each tool's rule must produce: -- **Solution field results**: This zip-file should include all the data used to plot the output like strains, stresses or displacements of the solution field. -- **Metrics file**: JSON-File summarizing key metrics (e.g., max Mises stress at Gauss points or maximum mises stress obtained when projecting to the nodes). -- All output files should be placed in the designated results directory (e.g., `snakemake_results/{benchmark}/{tool}/solution_field_data_{configuration}.zip`). `snakemake_results/` is generated from the level where snakemake is executed, but this directory is then zipped. - -To add another simulation tool: - - Create a new subdirectory for the tool. - - Create a new Snakefile with at least one rule that produces the outputs (metrics and solution fields) - - Ensure the rule accepts the standardized parameter file and mesh/input files. - - Update the main `Snakefile` to include the new tool's rules. \ No newline at end of file diff --git a/benchmarks/linear-elastic-plate-with-hole/Snakefile b/benchmarks/linear-elastic-plate-with-hole/Snakefile index fbae7d3..5a7b8c7 100644 --- a/benchmarks/linear-elastic-plate-with-hole/Snakefile +++ b/benchmarks/linear-elastic-plate-with-hole/Snakefile @@ -4,14 +4,14 @@ configfile: "workflow_config.json" result_dir = "snakemake_results/" + config["benchmark"] configuration_to_parameter_file = config["configuration_to_parameter_file"] configurations = config["configurations"] -tools = config["tools"] +tool = config["tool"] benchmark = config["benchmark"] benchmark_uri = config["benchmark_uri"] rule all: input: - expand(f"{result_dir}/{{tool}}/summary.json", tool=tools), + f"{result_dir}/{tool}/summary.json" rule create_mesh: input: @@ -21,7 +21,7 @@ rule create_mesh: # otherwise, you could just write configuration_to_parameter_file(configuration) parameters = lambda wildcards: configuration_to_parameter_file[wildcards.configuration], output: - mesh = f"{result_dir}/mesh/mesh_{{configuration}}.msh", + mesh = f"{result_dir}/{tool}/mesh/mesh_{{configuration}}.msh", conda: "environment_mesh.yml" shell: """ @@ -29,13 +29,11 @@ rule create_mesh: """ # Include tool-specific rules -# The should have at least the mesh file and the parameters as input +# They should have at least the mesh file and the parameters as input # and output for each configuration a # solution_metrics_{configuration}.json and -# and solution_field_data_{configuration}.zip whee all the visualization files are stored -# (e.g. vtk) -for tool in tools: - include: f"{tool}/Snakefile" +# and solution_field_data_{configuration}.zip where all the visualization files are stored (e.g. vtk) +include: f"{tool}/Snakefile" rule summary: @@ -44,7 +42,7 @@ rule summary: # (snakemake_results/linear-elastic-plate-with-hole/fenics/summary.json) script = "../common/summarize_results.py", parameters = expand("{param}", param=[configuration_to_parameter_file[c] for c in configurations]), - mesh = expand(f"{result_dir}/mesh/mesh_{{configuration}}.msh", configuration=configurations), + mesh = expand(f"{result_dir}/{tool}/mesh/mesh_{{configuration}}.msh", configuration=configurations), metrics = lambda wildcards: expand( f"{result_dir}/{{tool}}/solution_metrics_{{configuration}}.json", tool=[wildcards.tool], configuration=configurations @@ -70,12 +68,6 @@ rule summary: """ """ -Steps to add a new simulation tool to the workflow: - -1. Write the tool-specific workflow, scripts, environment file and store them in the benchmarks/linear-elastic-plate-with-hole/tool_name/. -2. Add the tool name to "tools" workflow_config.json (generated here using generate_config.py) - ------------------------------------------------------------------------------------------------------------------------- "rule all" defines the final target of the workflow. Knowing the final target, the snakemake determines the dependency chain automatically. diff --git a/benchmarks/linear-elastic-plate-with-hole/fenics/environment_simulation.yml b/benchmarks/linear-elastic-plate-with-hole/fenics/environment_simulation.yml index 2ce19d9..477dd37 100644 --- a/benchmarks/linear-elastic-plate-with-hole/fenics/environment_simulation.yml +++ b/benchmarks/linear-elastic-plate-with-hole/fenics/environment_simulation.yml @@ -10,6 +10,7 @@ dependencies: - python=3.12 - fenics-dolfinx=0.9.* - libadios2=2.10.1 + - mpich - petsc4py - pint - python-gmsh diff --git a/benchmarks/linear-elastic-plate-with-hole/fenics/fenics.nf b/benchmarks/linear-elastic-plate-with-hole/fenics/fenics.nf deleted file mode 100644 index a655fe8..0000000 --- a/benchmarks/linear-elastic-plate-with-hole/fenics/fenics.nf +++ /dev/null @@ -1,35 +0,0 @@ -params.tool = "fenics" - -process run_simulation { - publishDir "${params.result_dir}/${params.tool}/" - conda './fenics/environment_simulation.yml' - - input: - path python_script - tuple val(configuration), path(parameter_file), path(mesh_file) - - - output: - tuple val(configuration), path("solution_field_data_${configuration}.zip"), path("solution_metrics_${configuration}.json") - - script: - """ - python3 $python_script --input_parameter_file $parameter_file --input_mesh_file $mesh_file --output_solution_file_zip "solution_field_data_${configuration}.zip" --output_metrics_file "solution_metrics_${configuration}.json" - """ -} - -workflow fenics_workflow { - - take: - mesh_data // tuple(configuration, parameters, mesh) - result_dir - - main: - params.result_dir = result_dir - run_sim_script = Channel.value(file('fenics/run_fenics_simulation.py')) - output_process_run_simulation = run_simulation( run_sim_script, mesh_data ) - - emit: - output_process_run_simulation - -} \ No newline at end of file diff --git a/benchmarks/linear-elastic-plate-with-hole/generate_config.py b/benchmarks/linear-elastic-plate-with-hole/generate_config.py index d937455..41989d9 100644 --- a/benchmarks/linear-elastic-plate-with-hole/generate_config.py +++ b/benchmarks/linear-elastic-plate-with-hole/generate_config.py @@ -39,7 +39,6 @@ def get_configuration(file): # Reverse mapping for easy lookup by configuration name configuration_to_parameter_file = {v: str(k) for k, v in configurations.items()} -tools = ["fenics", "kratos"] benchmark = "linear-elastic-plate-with-hole" benchmark_uri = "https://portal.mardi4nfdi.de/wiki/Model:6775296" @@ -47,7 +46,6 @@ def get_configuration(file): workflow_config = { "configuration_to_parameter_file": configuration_to_parameter_file, "configurations": list(configurations.values()), - "tools": tools, "benchmark": benchmark, "benchmark_uri": benchmark_uri } diff --git a/benchmarks/linear-elastic-plate-with-hole/kratos/Snakefile b/benchmarks/linear-elastic-plate-with-hole/kratos/Snakefile index 38054c6..db90f62 100644 --- a/benchmarks/linear-elastic-plate-with-hole/kratos/Snakefile +++ b/benchmarks/linear-elastic-plate-with-hole/kratos/Snakefile @@ -12,7 +12,7 @@ kratos_material_template = f"{tool}/StructuralMaterials_template.json" rule mesh_to_mdpa: input: parameters = lambda wildcards: configuration_to_parameter_file[wildcards.configuration], - mesh = f"{result_dir}/mesh/mesh_{{configuration}}.msh", + mesh = f"{result_dir}/{tool}/mesh/mesh_{{configuration}}.msh", script = f"{tool}/msh_to_mdpa.py", output: mdpa = f"{result_dir}/{tool}/mesh_{{configuration}}.mdpa", diff --git a/benchmarks/linear-elastic-plate-with-hole/kratos/kratos.nf b/benchmarks/linear-elastic-plate-with-hole/kratos/kratos.nf deleted file mode 100644 index c5d8b91..0000000 --- a/benchmarks/linear-elastic-plate-with-hole/kratos/kratos.nf +++ /dev/null @@ -1,123 +0,0 @@ -params.tool = "kratos" - -process mesh_to_mdpa { - publishDir "${params.result_dir}/${params.tool}/" - conda './kratos/environment_simulation.yml' - - input: - path python_script - tuple val(configuration), path(parameter_file), path(mesh_file) - - output: - tuple val(configuration), path("mesh_${configuration}.mdpa") - - script: - """ - python3 ${python_script} \ - --input_parameter_file ${parameter_file} \ - --input_mesh_file ${mesh_file} \ - --output_mdpa_file mesh_${configuration}.mdpa - """ -} - -process create_kratos_input_and_run_simulation { - - // The process combines the creation of the Kratos input file (json) and the execution of the simulation. Initially, these were two separate processes. - // The combination was necessary because the create_kratos_input.py specifies the location of the mesh file and the output location of the simulation results in - // the json file. In the case of Nextflow, these locations are related to the process's sub-directory (inside the work directory). Executing the simulation as a - // separate process results in a failure to find the mesh file and write the output files unless the paths (in the json file) are explicitly provided as an input - // to the process. - // This is not an issue in the case of Snakemake, as the working directory doesn't automatically change between different rules. - - publishDir "${params.result_dir}/${params.tool}/" - conda './kratos/environment_simulation.yml' - - input: - path script_create_kratos_input - path script_run_kratos - tuple val(configuration), path(parameters), path(mdpa) - path kratos_input_template - path kratos_material_template - - output: - tuple val(configuration), path("ProjectParameters_${configuration}.json"), path("MaterialParameters_${configuration}.json"), path("${configuration}/Structure_0_1.vtk") - - script: - """ - python3 ${script_create_kratos_input} \ - --input_parameter_file ${parameters} \ - --input_mdpa_file ${mdpa} \ - --input_kratos_input_template ${kratos_input_template} \ - --input_material_template ${kratos_material_template} \ - --output_kratos_inputfile ProjectParameters_${configuration}.json \ - --output_kratos_materialfile MaterialParameters_${configuration}.json - - python3 ${script_run_kratos} \ - --input_parameter_file ${parameters} \ - --input_kratos_inputfile "ProjectParameters_${configuration}.json" \ - --input_kratos_materialfile "MaterialParameters_${configuration}.json" - """ -} - -process postprocess_kratos_results { - publishDir "${params.result_dir}/${params.tool}/" - conda './kratos/environment_simulation.yml' - - input: - path python_script - tuple val(configuration), path(parameter_file), path(result_vtk) - - output: - tuple val(configuration), path("solution_field_data_${configuration}.zip"), path("solution_metrics_${configuration}.json") - - script: - """ - python3 ${python_script} \ - --input_parameter_file ${parameter_file} \ - --input_result_vtk ${result_vtk} \ - --output_solution_file_zip solution_field_data_${configuration}.zip \ - --output_metrics_file solution_metrics_${configuration}.json - """ -} - -workflow kratos_workflow { - take: - mesh_data // tuple(configuration, parameters, mesh) //change the name - result_dir - - main: - params.result_dir = result_dir - - // Define script paths - msh_to_mdpa_script = Channel.value(file('kratos/msh_to_mdpa.py')) - create_input_script = Channel.value(file('kratos/create_kratos_input.py')) - run_sim_script = Channel.value(file('kratos/run_kratos_simulation.py')) - postprocess_script = Channel.value(file('kratos/postprocess_results.py')) - - // Template files - kratos_input_template = Channel.value(file('kratos/input_template.json')) - kratos_material_template = Channel.value(file('kratos/StructuralMaterials_template.json')) - - // Process pipeline - output_process_mesh_to_mdpa = mesh_to_mdpa(msh_to_mdpa_script, mesh_data) - - input_process_create_kratos_input = mesh_data.join(output_process_mesh_to_mdpa).map { tuple(it[0], it[1], it[3]) } - - //input_process_create_kratos_input.view() - output_create_kratos_input_and_run_simulation = create_kratos_input_and_run_simulation( - create_input_script, - run_sim_script, - input_process_create_kratos_input, - kratos_input_template, - kratos_material_template - ) - - input_process_postprocess_kratos_results = mesh_data.join(output_create_kratos_input_and_run_simulation).map { tuple(it[0], it[1], it[5]) } - - - output_process_postprocess_kratos_results = postprocess_kratos_results(postprocess_script,input_process_postprocess_kratos_results) - - emit: - output_process_postprocess_kratos_results -} - diff --git a/benchmarks/linear-elastic-plate-with-hole/main.nf b/benchmarks/linear-elastic-plate-with-hole/main.nf deleted file mode 100644 index e6bf069..0000000 --- a/benchmarks/linear-elastic-plate-with-hole/main.nf +++ /dev/null @@ -1,173 +0,0 @@ - -include { fenics_workflow } from './fenics/fenics.nf' -include { kratos_workflow } from './kratos/kratos.nf' - -process create_mesh { - //publishDir "$result_dir/mesh/" - publishDir "${params.result_dir}/mesh/" - conda 'environment_mesh.yml' - - input: - path python_script - val configuration - path parameter_file - - output: - // val(configuration) works as matching key with the input channel in the workflow - tuple val(configuration), path("mesh_${configuration}.msh") - - script: - """ - python3 $python_script --input_parameter_file $parameter_file --output_mesh_file "mesh_${configuration}.msh" - """ -} - -process summary{ - publishDir "${params.result_dir}/${tool}/" - conda 'environment_postprocessing.yml' - - input: - path python_script - val configuration - val parameter_file - val mesh_file - val solution_metrics - val solution_field_data - val benchmark - val benchmark_uri - val tool - - output: - path("summary.json") - - script: - """ - python3 $python_script \ - --input_configuration ${configuration.join(' ')} \ - --input_parameter_file ${parameter_file.join(' ')} \ - --input_mesh_file ${mesh_file.join(' ')} \ - --input_solution_metrics ${solution_metrics.join(' ')} \ - --input_solution_field_data ${solution_field_data.join(' ')} \ - --input_benchmark ${benchmark} \ - --input_benchmark_uri ${benchmark_uri} \ - --output_summary_json "summary.json" - - """ -} - - -def prepare_inputs_for_process_summary(input_process_run_simulation, output_process_run_simulation) { - - // Input: channels of the input and the output of the simulation process - // Output: a tuple of channels to be used as input for the summary process - // Purpose: To prepare inputs for the summary process (invoked once per simulation tool) from the output of the simulation process (depending on the number of configurations, invoked multiple times per simulation tool). - - // Firstly, the join operation is performed between the input and output channels of the simulation process based on a matching key (configuration). - - // Secondly, the individual components (configuration, parameter_file, mesh_file, solution_field_data, solution_metrics) are extracted from the joined tuples and collected into separate lists. - // The collect() method outputs a channel with a single entry as the summary process runs once per simulation tool. This single entry is a list of all the configurations or parameter files or mesh files etc. - - def matched_channels = input_process_run_simulation.join(output_process_run_simulation) - - def branched_channels = matched_channels.multiMap{ v, w, x, y, z -> - configuration : v - parameter_file : w - mesh : x - solution_field : y - metrics : z } - - return [ - branched_channels.configuration.collect(), - branched_channels.parameter_file.collect(), - branched_channels.mesh.collect(), - branched_channels.solution_field.collect(), - branched_channels.metrics.collect() - ] -} - -workflow { - main: - - def parameter_files_path = [] - params.configurations.each { elem -> - parameter_files_path.add(file(params.configuration_to_parameter_file[elem])) - } - - def ch_parameter_files = Channel.fromList(parameter_files_path) - def ch_configurations = Channel.fromList(params.configurations) - def ch_mesh_python_script = Channel.value(file('create_mesh.py')) - - //Creating Mesh - - output_process_create_mesh = create_mesh(ch_mesh_python_script, ch_configurations, ch_parameter_files) - - input_process_run_simulation = ch_configurations.merge(ch_parameter_files).join(output_process_create_mesh) - - //Running Simulation - - ch_tools = Channel.fromList(params.tools) - - input_process_run_simulation_with_tool = ch_tools.combine(input_process_run_simulation) - input_fenics_workflow = input_process_run_simulation_with_tool.filter{ it[0] == 'fenics' }.map{_w,x,y,z -> tuple(x,y,z)} - input_kratos_workflow = input_process_run_simulation_with_tool.filter{ it[0] == 'kratos' }.map{_w,x,y,z -> tuple(x,y,z)} - - - fenics_workflow(input_fenics_workflow, params.result_dir) - output_fenics_workflow = fenics_workflow.out - def (fenics_configurations,\ - fenics_parameter_files,\ - fenics_meshes,\ - fenics_solution_fields,\ - fenics_summary_metrics) = prepare_inputs_for_process_summary(input_fenics_workflow, output_fenics_workflow) - - - kratos_workflow(input_kratos_workflow, params.result_dir) - output_kratos_workflow = kratos_workflow.out - def (kratos_configurations, \ - kratos_parameter_files, \ - kratos_meshes, \ - kratos_solution_fields, \ - kratos_summary_metrics) = prepare_inputs_for_process_summary(input_kratos_workflow, output_kratos_workflow) - - - // channels are concatenated in the same order as they are passed to the .concat. The order should be consistent with the order of tools in ch_tools. - input_summary_configuration = fenics_configurations.concat(kratos_configurations) - input_summary_parameter_file = fenics_parameter_files.concat(kratos_parameter_files) - input_summary_mesh = fenics_meshes.concat(kratos_meshes) - input_summary_solution_field = fenics_solution_fields.concat(kratos_solution_fields) - input_summary_metrics = fenics_summary_metrics.concat(kratos_summary_metrics) - - //Summarizing results - def ch_benchmark = Channel.value(params.benchmark) - def ch_benchmark_uri = Channel.value(params.benchmark_uri) - def ch_summarize_python_script = Channel.value(file('../common/summarize_results.py')) - summary(ch_summarize_python_script, \ - input_summary_configuration, \ - input_summary_parameter_file, \ - input_summary_mesh, \ - input_summary_metrics, \ - input_summary_solution_field, \ - ch_benchmark, \ - ch_benchmark_uri, \ - ch_tools) - -} -/* -Steps to add a new simulation tool to the workflow: - -1. Write the tool-specific workflow, scripts, environment file and store them in the benchmarks/linear-elastic-plate-with-hole/tool_name/. -2. Add the tool name to "tools" workflow_config.json (generated here using generate_config.py) -3. Include the tool-specific workflow script at the top of this file. -4. Create an input channel for the new tool (e.g. see the definition of input_fenics_workflow) -5. Invoke the new tool-specific workflow (similar to fenics_workflow) & using its output, prepare inputs for the summary process. -6. Concatenate the prepared inputs to form the final input channels for the summary process. - ---------------------------------------------------------------------------------------------------------------------------------- - -Remark: Care should be taken to track the entries in the I/O channels, as the process output for a given configuration -may not arrive in the same order as the inputs were sent. When reusing channel entries after process execution, outputs should -be matched with their corresponding inputs using a common key. - -Information on channel operations: https://www.nextflow.io/docs/latest/reference/operator.html -Information on channels: https://training.nextflow.io/2.2/basic_training/channels/ -*/ \ No newline at end of file diff --git a/benchmarks/linear-elastic-plate-with-hole/plot_metrics.py b/benchmarks/linear-elastic-plate-with-hole/plot_metrics.py index 8089eb1..9a2ef34 100644 --- a/benchmarks/linear-elastic-plate-with-hole/plot_metrics.py +++ b/benchmarks/linear-elastic-plate-with-hole/plot_metrics.py @@ -3,34 +3,33 @@ from rdflib import Graph import matplotlib.pyplot as plt from collections import defaultdict -from generate_config import workflow_config -def load_graphs(base_dir): +def load_graphs(base_dirs): """ Walk through the base_dir and load all JSON-LD files into rdflib Graphs. """ graph_list = [] - for root, _, files in os.walk(base_dir): - for file in files: - if file.endswith(".jsonld"): - file_path = os.path.join(root, file) - try: - g = Graph() - g.parse(file_path, format='json-ld') - graph_list.append(g) - print(f"✅ Parsed: {file_path}") - except Exception as e: - print(f"❌ Failed to parse {file_path}: {e}") + for base_dir in base_dirs: + for root, _, files in os.walk(base_dir): + for file in files: + if file.endswith(".jsonld"): + file_path = os.path.join(root, file) + try: + g = Graph() + g.parse(file_path, format='json-ld') + graph_list.append(g) + print(f"✅ Parsed: {file_path}") + except Exception as e: + print(f"❌ Failed to parse {file_path}: {e}") print(f"\nTotal graphs loaded: {len(graph_list)}") return graph_list -def query_and_build_table(graph_list): +def query_and_build_table(graph_list, tools): """ Run SPARQL query on graphs and build a table. Returns headers and table_data. """ - tools = workflow_config["tools"] filter_conditions = " || ".join( f'CONTAINS(LCASE(?tool_name), "{tool.lower()}")' for tool in tools ) @@ -145,9 +144,9 @@ def plot_element_size_vs_stress(headers, table_data, output_file="element_size_v if __name__ == "__main__": parser = argparse.ArgumentParser(description="Process JSON-LD artifacts and display simulation results.") - parser.add_argument("artifact_folder", type=str, help="Path to the folder containing unzipped artifacts") + parser.add_argument("artifact_folders", type=str, nargs='+', help="Path(s) to the folder(s) containing unzipped artifacts") args = parser.parse_args() - - graphs = load_graphs(args.artifact_folder) - headers, table_data = query_and_build_table(graphs) + tools = ["fenics", "kratos"] # Specify the simulation tools used for filtering knowledge graphs + graphs = load_graphs(args.artifact_folders) + headers, table_data = query_and_build_table(graphs, tools) plot_element_size_vs_stress(headers, table_data, output_file="element_size_vs_stress.pdf") \ No newline at end of file diff --git a/environment_benchmarks.yml b/environment_benchmarks.yml index 2ee70d3..fd8fc1e 100644 --- a/environment_benchmarks.yml +++ b/environment_benchmarks.yml @@ -4,7 +4,6 @@ channels: - bioconda dependencies: - snakemake - - nextflow - pyvista - meshio - conda-lock