{
"bapcomposite1": {
"process_id": "bap_composite",
"arguments": {
"geometry": {
"type": "FeatureCollection",
"features": [
{
"id": "0",
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
4.567161970000427,
50.943775881070565
],
[
4.567161970000427,
50.92105115770309
],
[
4.607805261247165,
50.92105115770309
],
[
4.607805261247165,
50.943775881070565
],
[
4.567161970000427,
50.943775881070565
]
]
]
}
}
]
},
"temporal_extent": [
"2022-05-01",
"2022-05-31"
]
},
"namespace": "https://raw.githubusercontent.com/ESA-APEx/apex_algorithms/refs/heads/main/algorithm_catalog/vito/bap_composite/openeo_udp/bap_composite.json",
"result": true
}
}
scenario = BenchmarkScenario(id='bap_composite', description='bap composite example', backend='openeofed.dataspace.copernicus.eu'...ner/work/apex_algorithms/apex_algorithms/algorithm_catalog/vito/bap_composite/benchmark_scenarios/bap_composite.json'))
connection_factory = <function connection_factory.<locals>.get_connection at 0x7f751bd02480>
tmp_path = PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0')
track_metric = <function track_metric.<locals>.track at 0x7f751bd025c0>
track_phase = <apex_algorithm_qa_tools.pytest.pytest_track_metrics._PhaseTracker object at 0x7f751bd09a90>
upload_assets_on_fail = <apex_algorithm_qa_tools.pytest.pytest_upload_assets.upload_assets_on_fail.<locals>._Collector object at 0x7f7561ad4f80>
request = <FixtureRequest for <Function test_run_benchmark[bap_composite]>>
@pytest.mark.parametrize(
"scenario",
[
# Use scenario id as parameterization id to give nicer test names.
pytest.param(uc, id=uc.id)
for uc in get_benchmark_scenarios()
],
)
def test_run_benchmark(
scenario: BenchmarkScenario,
connection_factory,
tmp_path: Path,
track_metric,
track_phase,
upload_assets_on_fail,
request,
):
track_metric("scenario_id", scenario.id)
with track_phase(phase="connect"):
# Check if a backend override has been provided via cli options.
override_backend = request.config.getoption("--override-backend")
backend_filter = request.config.getoption("--backend-filter")
if backend_filter and not re.match(backend_filter, scenario.backend):
# TODO apply filter during scenario retrieval, but seems to be hard to retrieve cli param
pytest.skip(
f"skipping scenario {scenario.id} because backend {scenario.backend} does not match filter {backend_filter!r}"
)
backend = scenario.backend
if override_backend:
_log.info(f"Overriding backend URL with {override_backend!r}")
backend = override_backend
connection: openeo.Connection = connection_factory(url=backend)
report_path = None
if request.config.getoption("--upload-benchmark-report"):
report_path = tmp_path / "benchmark_report.json"
report_path.write_text(json.dumps({
"scenario_id": scenario.id,
"scenario_description": scenario.description,
"scenario_backend": scenario.backend,
"scenario_source": str(scenario.source) if scenario.source else None,
"reference_data": scenario.reference_data,
"reference_options": scenario.reference_options,
}, indent=2))
upload_assets_on_fail(report_path)
def _on_phase_exception(phase: str, exc: Exception):
if report_path is not None:
report = json.loads(report_path.read_text())
report["test_failed"] = True
report["test_failed_phase"] = phase
report["test_error_message"] = str(exc)
report_path.write_text(json.dumps(report, indent=2))
cwd_report_dir = Path("benchmark_reports")
cwd_report_dir.mkdir(exist_ok=True)
(cwd_report_dir / f"{scenario.id}_benchmark_report.json").write_text(
json.dumps(report, indent=2)
)
report_url = upload_assets_on_fail.get_url(report_path)
if report_url:
exc.add_note(f"Benchmark report: {report_url}")
track_phase.on_exception = _on_phase_exception
with track_phase(phase="create-job"):
# TODO #14 scenario option to use synchronous instead of batch job mode?
job = connection.create_job(
process_graph=scenario.process_graph,
title=f"APEx benchmark {scenario.id}",
additional=scenario.job_options,
)
track_metric("job_id", job.job_id)
if report_path is not None:
report = json.loads(report_path.read_text())
report["job_id"] = job.job_id
report_path.write_text(json.dumps(report, indent=2))
with track_phase(phase="run-job"):
# TODO: monitor timing and progress
# TODO: separate "job started" and run phases?
max_minutes = request.config.getoption("--maximum-job-time-in-minutes")
if max_minutes:
def _timeout_handler(signum, frame):
raise TimeoutError(
f"Batch job {job.job_id} exceeded maximum allowed time of {max_minutes} minutes"
)
old_handler = signal.signal(signal.SIGALRM, _timeout_handler)
signal.alarm(max_minutes * 60)
try:
job.start_and_wait()
finally:
if max_minutes:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
with track_phase(phase="collect-metadata"):
collect_metrics_from_job_metadata(job, track_metric=track_metric)
results = job.get_results()
collect_metrics_from_results_metadata(results, track_metric=track_metric)
with track_phase(phase="download-actual"):
# Download actual results
actual_dir = tmp_path / "actual"
paths = results.download_files(target=actual_dir, include_stac_metadata=True)
# Upload assets on failure
upload_assets_on_fail(*paths)
# Pre-compute S3 URLs for actual files (used in error messages and benchmark reports)
actual_s3_urls = {
str(p.relative_to(actual_dir)): upload_assets_on_fail.get_url(p)
for p in sorted(actual_dir.rglob("*")) if p.is_file()
}
actual_s3_urls = {k: v for k, v in actual_s3_urls.items() if v is not None}
with track_phase(phase="download-reference"):
reference_dir = download_reference_data(
scenario=scenario, reference_dir=tmp_path / "reference"
)
if report_path is not None:
report = json.loads(report_path.read_text())
report["actual_files"] = {
str(p.relative_to(actual_dir)): f"{p.stat().st_size / 1024:.1f} kb"
for p in sorted(actual_dir.rglob("*")) if p.is_file()
}
ref_files = {}
for p in sorted(reference_dir.rglob("*")):
if not p.is_file():
continue
rel = p.relative_to(reference_dir)
size_str = f"{p.stat().st_size / 1024:.1f} kb"
actual_counterpart = actual_dir / rel
if not actual_counterpart.exists():
size_str += " (missing in actual)"
elif actual_counterpart.stat().st_size != p.stat().st_size:
size_str += f" (actual: {actual_counterpart.stat().st_size / 1024:.1f} kb)"
ref_files[str(rel)] = size_str
report["reference_files"] = ref_files
if actual_s3_urls:
report["actual_data"] = actual_s3_urls
report_path.write_text(json.dumps(report, indent=2))
# Also write to CWD so the report is accessible on Jenkins workspace
cwd_report_dir = Path("benchmark_reports")
cwd_report_dir.mkdir(exist_ok=True)
(cwd_report_dir / f"{scenario.id}_benchmark_report.json").write_text(
json.dumps(report, indent=2)
)
with track_phase(
phase="compare", describe_exception=analyse_results_comparison_exception
):
# Compare actual results with reference data
try:
assert_job_results_allclose(
actual=actual_dir,
expected=reference_dir,
tmp_path=tmp_path,
rtol=scenario.reference_options.get("rtol", 1e-3),
atol=scenario.reference_options.get("atol", 1),
pixel_tolerance=scenario.reference_options.get("pixel_tolerance", 1),
)
except AssertionError as e:
msg = str(e)
if scenario.reference_data:
msg += "\n\nReference data URLs:"
for name, url in scenario.reference_data.items():
msg += f"\n {name}: {url}"
if actual_s3_urls:
msg += "\n\nActual data S3 URLs (uploaded on failure):"
for name, url in actual_s3_urls.items():
msg += f"\n {name}: {url}"
> raise AssertionError(msg) from None
E AssertionError: Issues for metadata file 'job-results.json':
E Differing 'derived_from' links (7 common, 0 only in actual, 6 only in expected):
E only in actual: set()
E only in expected: {'S2A_MSIL2A_20220517T103631_N0510_R008_T31UES_20240619T001524', 'S2A_MSIL2A_20220527T103631_N0510_R008_T31UES_20241128T035639', 'S2A_MSIL2A_20220507T103631_N0510_R008_T31UES_20240708T205659', 'S2B_MSIL2A_20220502T103619_N0510_R008_T31UES_20240609T014539', 'S2B_MSIL2A_20220522T103629_N0510_R008_T31UES_20240613T093452', 'S2B_MSIL2A_20220515T104619_N0510_R051_T31UES_20240610T215024'}.
E
E Reference data URLs:
E job-results.json: https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-18735405985!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/job-results.json
E openEO_2022-05-01Z.tif: https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-18735405985!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/openEO_2022-05-01Z.tif
E
E Actual data S3 URLs (uploaded on failure):
E job-results.json: https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-24459422967!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/job-results.json
E openEO_2022-05-01Z.tif: https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-24459422967!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/openEO_2022-05-01Z.tif
tests/test_benchmarks.py:201: AssertionError
----------------------------- Captured stdout call -----------------------------
0:00:00 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': send 'start'
0:00:19 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': created (progress 0%)
0:00:25 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': created (progress 0%)
0:00:32 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': created (progress 0%)
0:00:40 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': created (progress 0%)
0:00:50 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': created (progress 0%)
0:01:03 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': running (progress N/A)
0:01:20 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': running (progress N/A)
0:01:39 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': running (progress N/A)
0:02:04 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': running (progress N/A)
0:02:36 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': running (progress N/A)
0:03:14 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': running (progress N/A)
0:04:02 Job 'cdse-j-2604151412264bc0ad3b766b5906a5d6': finished (progress 100%)
------------------------------ Captured log call -------------------------------
INFO conftest:conftest.py:145 Connecting to 'openeofed.dataspace.copernicus.eu'
INFO openeo.config:config.py:193 Loaded openEO client config from sources: []
INFO conftest:conftest.py:158 Checking for auth_env_var='OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED' to drive auth against url='openeofed.dataspace.copernicus.eu'.
INFO conftest:conftest.py:162 Extracted provider_id='CDSE' client_id='openeo-apex-benchmarks-service-account' from auth_env_var='OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED'
INFO openeo.rest.connection:connection.py:302 Found OIDC providers: ['CDSE']
INFO openeo.rest.auth.oidc:oidc.py:410 Doing 'client_credentials' token request 'https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token' with post data fields ['grant_type', 'client_id', 'client_secret', 'scope'] (client_id 'openeo-apex-benchmarks-service-account')
INFO openeo.rest.connection:connection.py:401 Obtained tokens: ['token_type', 'access_token', 'expires_in', 'id_token', 'scope']
INFO openeo.rest.job:job.py:436 Downloading Job result asset 'openEO_2022-05-01Z.tif' from https://s3.waw3-1.openeo.v1.dataspace.copernicus.eu/openeo-data-prod-waw4-1/batch_jobs/j-2604151412264bc0ad3b766b5906a5d6/openEO_2022-05-01Z.tif?X-Proxy-Head-As-Get=true&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=f260da2e6e68448d92b2c0741bdf8b5a%2F20260415%2Fwaw4-1%2Fs3%2Faws4_request&X-Amz-Date=20260415T141633Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Security-Token=eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlX2FybiI6ImFybjpvcGVuZW93czppYW06Ojpyb2xlL29wZW5lby1kYXRhLXByb2Qtd2F3NC0xLXdvcmtzcGFjZSIsImluaXRpYWxfaXNzdWVyIjoib3BlbmVvLnByb2Qud2F3My0xLm9wZW5lby1pbnQudjEuZGF0YXNwYWNlLmNvcGVybmljdXMuZXUiLCJodHRwczovL2F3cy5hbWF6b24uY29tL3RhZ3MiOnsicHJpbmNpcGFsX3RhZ3MiOnsiam9iX2lkIjpbImotMjYwNDE1MTQxMjI2NGJjMGFkM2I3NjZiNTkwNmE1ZDYiXSwidXNlcl9pZCI6WyI2YTc3ZmNkMS05YzA4LTQ2ZTktYjg3NS01NGZiOTk5YWIyMDAiXX0sInRyYW5zaXRpdmVfdGFnX2tleXMiOlsidXNlcl9pZCIsImpvYl9pZCJdfSwiaXNzIjoic3RzLndhdzMtMS5vcGVuZW8udjEuZGF0YXNwYWNlLmNvcGVybmljdXMuZXUiLCJzdWIiOiJvcGVuZW8tZHJpdmVyIiwiZXhwIjoxNzc2MzA1NzkzLCJuYmYiOjE3NzYyNjI1OTMsImlhdCI6MTc3NjI2MjU5MywianRpIjoiNzU1MzExMWItNTUyOC00MDE0LTk1MDQtMTk5Mzc0NmIxOTZlIiwiYWNjZXNzX2tleV9pZCI6ImYyNjBkYTJlNmU2ODQ0OGQ5MmIyYzA3NDFiZGY4YjVhIn0.VN1YnjVV59DEzaXL8xMJawwsLPuUe576jwlOa6aG1_M3pJGpEryw-lcY4FDH4vESGa49_tUwwrs8pacZ_MT-U4tdaFuCRvrODB8faQLtkNOjm7H-dqw4Xbl0g_wqHdRWTrviwBJD-OpSxulXqO75fJ01a8TIm3Gc4W88miu0ifT4s6LLEk5zPzNkuVcGgKB51qjU1NxQlRKTtsIPR6UA3XVjzt4KkpKl4dvt6kU91mQIxx6MAsFNIEWN-Xu5WxZ_4Ek2BB3Mh12-pp99x8hQBFPFzoBbjKQUdIw6Zq8eE6u3HurptyG7r5i26m8I5l9I1gFXUWLc2Or_oRL-FuZm8w&X-Amz-Signature=2d8d3ae00b29bbe574b582072566485fd1293414a798ba9292ba000efac44cdd to /home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/actual/openEO_2022-05-01Z.tif
INFO apex_algorithm_qa_tools.scenarios:util.py:345 Downloading reference data for scenario.id='bap_composite' to reference_dir=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/reference'): start 2026-04-15 14:16:37.234957
INFO apex_algorithm_qa_tools.scenarios:util.py:345 Downloading source='https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-18735405985!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/job-results.json' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/reference/job-results.json'): start 2026-04-15 14:16:37.235276
INFO apex_algorithm_qa_tools.scenarios:util.py:351 Downloading source='https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-18735405985!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/job-results.json' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/reference/job-results.json'): end 2026-04-15 14:16:38.007006, elapsed 0:00:00.771730
INFO apex_algorithm_qa_tools.scenarios:util.py:345 Downloading source='https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-18735405985!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/openEO_2022-05-01Z.tif' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/reference/openEO_2022-05-01Z.tif'): start 2026-04-15 14:16:38.007363
INFO apex_algorithm_qa_tools.scenarios:util.py:351 Downloading source='https://s3.waw3-1.cloudferro.com/apex-benchmarks/gh-18735405985!tests_test_benchmarks.py__test_run_benchmark_bap_composite_!actual/openEO_2022-05-01Z.tif' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/reference/openEO_2022-05-01Z.tif'): end 2026-04-15 14:16:39.421429, elapsed 0:00:01.414066
INFO apex_algorithm_qa_tools.scenarios:util.py:351 Downloading reference data for scenario.id='bap_composite' to reference_dir=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/reference'): end 2026-04-15 14:16:39.421681, elapsed 0:00:02.186724
INFO openeo.testing.results:results.py:423 Comparing job results: PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/actual') vs PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_bap_composi0/reference')
Benchmark scenario ID:
bap_compositeBenchmark scenario definition: https://github.com/ESA-APEx/apex_algorithms/blob/b70b92e02a12426bca4678170017234d40170416/algorithm_catalog/vito/bap_composite/benchmark_scenarios/bap_composite.json
openEO backend: openeofed.dataspace.copernicus.eu
GitHub Actions workflow run: https://github.com/ESA-APEx/apex_algorithms/actions/runs/24459422967
Workflow artifacts: https://github.com/ESA-APEx/apex_algorithms/actions/runs/24459422967#artifacts
Test start: 2026-04-15 14:12:21.941958+00:00
Test duration: 0:04:17.575937
Test outcome: ❌ failed
Last successful test phase: download-reference
Failure in test phase: compare:derived_from-change
Contact Information
Process Graph
{ "bapcomposite1": { "process_id": "bap_composite", "arguments": { "geometry": { "type": "FeatureCollection", "features": [ { "id": "0", "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [ 4.567161970000427, 50.943775881070565 ], [ 4.567161970000427, 50.92105115770309 ], [ 4.607805261247165, 50.92105115770309 ], [ 4.607805261247165, 50.943775881070565 ], [ 4.567161970000427, 50.943775881070565 ] ] ] } } ] }, "temporal_extent": [ "2022-05-01", "2022-05-31" ] }, "namespace": "https://raw.githubusercontent.com/ESA-APEx/apex_algorithms/refs/heads/main/algorithm_catalog/vito/bap_composite/openeo_udp/bap_composite.json", "result": true } }Error Logs