| 1 | //! End-to-end test harness for ARMFORTAS. |
| 2 | //! |
| 3 | //! Discovers Fortran source files in `test_programs/`, compiles each with |
| 4 | //! `armfortas`, runs the binary, and checks stdout against `! CHECK:` |
| 5 | //! annotations. |
| 6 | //! |
| 7 | //! Each `! CHECK:` line specifies a substring that must appear in the output, |
| 8 | //! in order. Whitespace is trimmed for comparison. |
| 9 | //! |
| 10 | //! ## XFAIL annotations |
| 11 | //! |
| 12 | //! A program may carry one or more `! XFAIL: <reason>` lines anywhere in |
| 13 | //! the source. An XFAIL'd program is *expected* to fail at compile or |
| 14 | //! runtime (or to mismatch its CHECKs) — it's tracking a known bug. The |
| 15 | //! harness reports: |
| 16 | //! |
| 17 | //! * `XFAIL` — the program failed as expected. Counted as a pass. |
| 18 | //! * `XPASS` — the program unexpectedly succeeded. Counted as a |
| 19 | //! failure so we get loud notification that the bug is now fixed |
| 20 | //! and the XFAIL annotation should be removed. |
| 21 | //! |
| 22 | //! XFAIL'd programs are how we capture audit findings as living |
| 23 | //! regression tests *before* the underlying bug is fixed. Each finding |
| 24 | //! gets a program in `test_programs/` whose annotation references the |
| 25 | //! audit ID (`! XFAIL: audit BLOCKING-1 (implied-do negative step)`), |
| 26 | //! so a future audit can grep `test_programs/` for the finding ID and |
| 27 | //! immediately see whether the bug class is covered. |
| 28 | //! |
| 29 | //! ## ERROR_EXPECTED annotations |
| 30 | //! |
| 31 | //! A program may also carry an `! ERROR_EXPECTED: <substring>` line. |
| 32 | //! That asserts the program **must fail to compile**, and the |
| 33 | //! compiler's stderr **must contain** the given substring. This is |
| 34 | //! how we test "should be a diagnostic" cases — Fortran constraint |
| 35 | //! violations that the compiler is required to reject. The semantics: |
| 36 | //! |
| 37 | //! * If `ERROR_EXPECTED` is present, CHECK lines are ignored. |
| 38 | //! * If compilation succeeds, the test fails. |
| 39 | //! * If compilation fails but stderr doesn't contain the substring, |
| 40 | //! the test fails. |
| 41 | //! * If compilation fails with the expected substring, the test |
| 42 | //! passes. |
| 43 | //! |
| 44 | //! `ERROR_EXPECTED` composes with `XFAIL`: if a program is annotated |
| 45 | //! with both, an XFAIL fires when the expected error is **not** |
| 46 | //! produced (the bug is "we don't yet diagnose this"), and an XPASS |
| 47 | //! fires once we start diagnosing it correctly (so the XFAIL |
| 48 | //! annotation can come off and the program becomes a regular |
| 49 | //! "diagnostic regression" test). |
| 50 | //! |
| 51 | //! `! ERROR_SPAN: <line>:<col>` composes with `ERROR_EXPECTED` and |
| 52 | //! asserts that the diagnostic points at the expected source |
| 53 | //! location. The span check is substring-based against the emitted |
| 54 | //! diagnostic location text, so the compiler can print either |
| 55 | //! `path:line:col:` or `line:col:` and still satisfy the contract. |
| 56 | //! |
| 57 | //! ## IR_CHECK / IR_NOT annotations |
| 58 | //! |
| 59 | //! For tests that need to assert on the *shape* of the lowered IR |
| 60 | //! (not just the runtime answer), two extra annotations exist: |
| 61 | //! |
| 62 | //! * `! IR_CHECK: <substring>` — the substring must appear in the |
| 63 | //! compiler's `--emit-ir` output. Multiple IR_CHECKs must appear |
| 64 | //! in the order they're declared. |
| 65 | //! * `! IR_NOT: <substring>` — the substring must NOT appear in the |
| 66 | //! `--emit-ir` output. Used for negative-shape assertions like |
| 67 | //! "this PARAMETER local must not have a `store` instruction" |
| 68 | //! or "this expression must not lower to a `global_addr`". |
| 69 | //! |
| 70 | //! IR shape is only stable at -O0 (the optimization passes erase |
| 71 | //! dead code, fold constants, hoist loads, etc.), so IR_CHECK / |
| 72 | //! IR_NOT only fire at the -O0 test level. The runtime CHECKs |
| 73 | //! continue to run at every opt level as before. |
| 74 | //! |
| 75 | //! Audit5 MIN-2: this exists because audit4 captured the |
| 76 | //! parameter-inlining and module-allocatable bugs as runtime tests |
| 77 | //! only. A future regression that broke the IR shape but happened |
| 78 | //! to land on the right runtime answer would slip through. |
| 79 | //! |
| 80 | //! ## STDERR_CHECK / EXIT_CODE annotations |
| 81 | //! |
| 82 | //! Runtime tests can also assert on stderr and process exit status: |
| 83 | //! |
| 84 | //! * `! STDERR_CHECK: <substring>` — ordered substring checks |
| 85 | //! against the program's stderr stream. |
| 86 | //! * `! EXIT_CODE: <int>` — exact process exit code. Without this |
| 87 | //! annotation, the harness preserves the old rule that runtime |
| 88 | //! tests must exit successfully. |
| 89 | //! |
| 90 | //! This makes runtime tests expressive enough for paths like |
| 91 | //! `ERROR STOP`, warning-like stderr output, and future |
| 92 | //! side-effect-heavy programs without forcing them through |
| 93 | //! `ERROR_EXPECTED`, which is compile-failure-only. |
| 94 | //! |
| 95 | //! ## ASM_CHECK / ASM_NOT annotations |
| 96 | //! |
| 97 | //! Runtime tests can also pin emitted assembly shape: |
| 98 | //! |
| 99 | //! * `! ASM_CHECK: <substring>` — the substring must appear in |
| 100 | //! the compiler's `-S` output. Multiple checks must appear in |
| 101 | //! the order they are declared. |
| 102 | //! * `! ASM_NOT: <substring>` — the substring must NOT appear in |
| 103 | //! the emitted assembly text. |
| 104 | //! |
| 105 | //! Unlike IR checks, assembly shape can legitimately vary by opt |
| 106 | //! level, so ASM checks fire at every optimization level. Tests |
| 107 | //! should use stable substrings that are intentionally expected |
| 108 | //! across the requested matrix. |
| 109 | //! |
| 110 | //! ## FILE_CHECK / FILE_NOT / FILE_EXISTS / FILE_MISSING / FILE_LINE_COUNT / FILE_RERUN_MODE / FILE_SET_EXACT annotations |
| 111 | //! |
| 112 | //! Runtime tests can assert on files created inside their per-test |
| 113 | //! sandbox: |
| 114 | //! |
| 115 | //! * `! FILE_CHECK: <relative-path> => <substring>` — the file must |
| 116 | //! exist after execution, and the substring must appear in its |
| 117 | //! contents. Multiple checks for the same file must appear in the |
| 118 | //! order declared. |
| 119 | //! * `! FILE_NOT: <relative-path> => <substring>` — the file must |
| 120 | //! exist, and the substring must not appear in its contents. |
| 121 | //! * `! FILE_EXISTS: <relative-path>` — the file must exist after the |
| 122 | //! run, regardless of content. |
| 123 | //! * `! FILE_MISSING: <relative-path>` — the file must not exist after |
| 124 | //! the run. |
| 125 | //! * `! FILE_LINE_COUNT: <relative-path> => <int>` — the file must |
| 126 | //! exist and contain exactly that many text lines. |
| 127 | //! * `! FILE_RERUN_MODE: <relative-path> => stable|append` — when the |
| 128 | //! program is executed twice in the same sandbox, the named file must |
| 129 | //! either be byte-identical after both runs (`stable`) or grow by |
| 130 | //! strict append (`append`). |
| 131 | //! * `! FILE_SET_EXACT: <relative-path>[,<relative-path>...]` — the |
| 132 | //! final sandbox file set must match exactly, with no extra side |
| 133 | //! effects beyond the listed relative paths. |
| 134 | //! |
| 135 | //! Paths are sandbox-relative on purpose. The harness runs each binary |
| 136 | //! in a private temp directory, so file assertions pin side effects |
| 137 | //! without letting tests stomp on one another. |
| 138 | //! |
| 139 | //! ## REPRO_CHECK annotations |
| 140 | //! |
| 141 | //! Tests can also opt into explicit reproducibility checks: |
| 142 | //! |
| 143 | //! * `! REPRO_CHECK: asm` — compile twice with `-S` and require |
| 144 | //! identical assembly bytes. |
| 145 | //! * `! REPRO_CHECK: obj` — compile twice with `-c` and require |
| 146 | //! identical object bytes. |
| 147 | //! * `! REPRO_CHECK: run` — execute twice in fresh sandboxes and |
| 148 | //! require identical exit/stdout/stderr plus identical written files. |
| 149 | //! * `! REPRO_CHECK: run_same_sandbox` — execute twice in the same |
| 150 | //! sandbox and require the second run to leave the same final |
| 151 | //! exit/stdout/stderr and file snapshot as the first. |
| 152 | //! |
| 153 | //! These are test-local determinism assertions, lighter-weight than the |
| 154 | //! dedicated global determinism tests at the bottom of this file. |
| 155 | //! |
| 156 | //! ## OPT_EQ annotations |
| 157 | //! |
| 158 | //! Cross-opt invariants can be asserted explicitly: |
| 159 | //! |
| 160 | //! * `! OPT_EQ: O0,O1,O2 => stdout|stderr|exit` |
| 161 | //! * `! OPT_EQ: O0,O1 => asm` |
| 162 | //! |
| 163 | //! The first listed opt level is the baseline. When that level runs, the |
| 164 | //! harness compiles the same source at the other listed opt levels and |
| 165 | //! compares the requested surfaces. This lets a test say "these runtime |
| 166 | //! surfaces must agree across optimization" without also pinning every |
| 167 | //! assembly detail at every level. |
| 168 | //! |
| 169 | //! ## PHASE_TRIANGULATE annotations |
| 170 | //! |
| 171 | //! Phase triangulation lets a runtime test say that additional compiler |
| 172 | //! surfaces must also materialize successfully at the same optimization level: |
| 173 | //! |
| 174 | //! * `! PHASE_TRIANGULATE: ir|asm|obj` |
| 175 | //! * `! PHASE_TRIANGULATE: ir|asm|obj|clean` |
| 176 | //! * `! PHASE_TRIANGULATE: ir|asm|obj|repro` |
| 177 | //! |
| 178 | //! The linked runtime path is the anchor. If the program runs correctly but |
| 179 | //! one of the requested extra surfaces fails to compile or produces an empty |
| 180 | //! artifact, the test still fails. This is how the harness starts relating |
| 181 | //! linked execution to `--emit-ir`, `-S`, and `-c` instead of treating them as |
| 182 | //! isolated one-off checks. |
| 183 | //! |
| 184 | //! `clean` strengthens that oracle further: the compile-only phases must leave |
| 185 | //! only their explicit output artifact in a private phase sandbox. That lets a |
| 186 | //! runtime-side-effecting program assert that `--emit-ir`, `-S`, and `-c` do |
| 187 | //! not accidentally create the files that only linked execution should create. |
| 188 | //! |
| 189 | //! `repro` strengthens it in a different direction: each requested compile-only |
| 190 | //! phase must produce byte-identical output across two independent compilations. |
| 191 | //! This keeps pipeline oracles from checking only "exists" when what we really |
| 192 | //! need is "exists, stays clean, and stays deterministic". |
| 193 | |
| 194 | use std::collections::BTreeMap; |
| 195 | use std::fs; |
| 196 | use std::path::{Path, PathBuf}; |
| 197 | use std::process::Command; |
| 198 | use std::sync::atomic::{AtomicUsize, Ordering}; |
| 199 | |
| 200 | static NEXT_TEMP_ID: AtomicUsize = AtomicUsize::new(0); |
| 201 | |
| 202 | /// A single expected check. |
| 203 | struct Check { |
| 204 | line_num: usize, |
| 205 | pattern: String, |
| 206 | } |
| 207 | |
| 208 | /// A single file-content assertion against the per-test sandbox. |
| 209 | struct FileCheck { |
| 210 | line_num: usize, |
| 211 | rel_path: String, |
| 212 | pattern: String, |
| 213 | negative: bool, |
| 214 | } |
| 215 | |
| 216 | struct FilePresenceCheck { |
| 217 | line_num: usize, |
| 218 | rel_path: String, |
| 219 | should_exist: bool, |
| 220 | } |
| 221 | |
| 222 | struct FileLineCountCheck { |
| 223 | line_num: usize, |
| 224 | rel_path: String, |
| 225 | expected_lines: usize, |
| 226 | } |
| 227 | |
| 228 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 229 | enum FileRerunMode { |
| 230 | Stable, |
| 231 | Append, |
| 232 | } |
| 233 | |
| 234 | struct FileRerunModeCheck { |
| 235 | line_num: usize, |
| 236 | rel_path: String, |
| 237 | mode: FileRerunMode, |
| 238 | } |
| 239 | |
| 240 | struct FileSetExactCheck { |
| 241 | line_num: usize, |
| 242 | rel_paths: Vec<String>, |
| 243 | } |
| 244 | |
| 245 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 246 | enum ReproStage { |
| 247 | Asm, |
| 248 | Obj, |
| 249 | Run, |
| 250 | RunSameSandbox, |
| 251 | } |
| 252 | |
| 253 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 254 | enum OptEqComponent { |
| 255 | Stdout, |
| 256 | Stderr, |
| 257 | Exit, |
| 258 | Asm, |
| 259 | } |
| 260 | |
| 261 | #[derive(Debug, Clone, PartialEq, Eq)] |
| 262 | struct OptEqRule { |
| 263 | line_num: usize, |
| 264 | opt_flags: Vec<String>, |
| 265 | components: Vec<OptEqComponent>, |
| 266 | } |
| 267 | |
| 268 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 269 | enum PhaseSurface { |
| 270 | Ir, |
| 271 | Asm, |
| 272 | Obj, |
| 273 | Clean, |
| 274 | Repro, |
| 275 | } |
| 276 | |
| 277 | #[derive(Debug, Clone, PartialEq, Eq)] |
| 278 | struct PhaseTriangulation { |
| 279 | line_num: usize, |
| 280 | surfaces: Vec<PhaseSurface>, |
| 281 | } |
| 282 | |
| 283 | #[derive(Debug, Clone, PartialEq, Eq)] |
| 284 | struct RunSnapshot { |
| 285 | exit_code: i32, |
| 286 | stdout: String, |
| 287 | stderr: String, |
| 288 | files: BTreeMap<String, Vec<u8>>, |
| 289 | } |
| 290 | |
| 291 | // ---- Multi-file test support ---- |
| 292 | |
| 293 | /// A named source segment extracted from a multi-file test program. |
| 294 | struct MultifileSegment { |
| 295 | /// The declared filename (e.g. "mymod.f90"). |
| 296 | name: String, |
| 297 | /// The Fortran source for this segment. |
| 298 | source: String, |
| 299 | } |
| 300 | |
| 301 | /// Split a source file on `!--- file: <name>` markers. |
| 302 | /// Returns `None` if no markers are present (single-file test). |
| 303 | fn split_multifile_segments(source: &str) -> Option<Vec<MultifileSegment>> { |
| 304 | let mut segments = Vec::new(); |
| 305 | let mut current_name: Option<String> = None; |
| 306 | let mut current_lines: Vec<&str> = Vec::new(); |
| 307 | |
| 308 | for line in source.lines() { |
| 309 | let trimmed = line.trim(); |
| 310 | if let Some(rest) = trimmed.strip_prefix("!--- file:") { |
| 311 | // Flush previous segment. |
| 312 | if let Some(name) = current_name.take() { |
| 313 | segments.push(MultifileSegment { |
| 314 | name, |
| 315 | source: current_lines.join("\n") + "\n", |
| 316 | }); |
| 317 | current_lines.clear(); |
| 318 | } |
| 319 | current_name = Some(rest.trim().to_string()); |
| 320 | } else if current_name.is_some() { |
| 321 | current_lines.push(line); |
| 322 | } |
| 323 | // Lines before any !--- file: marker are annotation-only preamble |
| 324 | // (CHECK, MULTIFILE_LINK, etc.) — not included in any segment. |
| 325 | } |
| 326 | |
| 327 | // Flush last segment. |
| 328 | if let Some(name) = current_name { |
| 329 | segments.push(MultifileSegment { |
| 330 | name, |
| 331 | source: current_lines.join("\n") + "\n", |
| 332 | }); |
| 333 | } |
| 334 | |
| 335 | if segments.is_empty() { |
| 336 | None |
| 337 | } else { |
| 338 | Some(segments) |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | /// Extract `! MULTIFILE_LINK: a.f90 b.f90 ...` — the link order. |
| 343 | /// If absent, segments are linked in declaration order. |
| 344 | fn extract_multifile_link(source: &str) -> Option<Vec<String>> { |
| 345 | for line in source.lines() { |
| 346 | let trimmed = line.trim(); |
| 347 | if let Some(rest) = trimmed.strip_prefix("! MULTIFILE_LINK:") { |
| 348 | let names: Vec<String> = rest.split_whitespace().map(|s| s.to_string()).collect(); |
| 349 | if !names.is_empty() { |
| 350 | return Some(names); |
| 351 | } |
| 352 | } |
| 353 | } |
| 354 | None |
| 355 | } |
| 356 | |
| 357 | fn candidate_target_dirs() -> Vec<PathBuf> { |
| 358 | let mut dirs = Vec::new(); |
| 359 | if let Ok(exe) = std::env::current_exe() { |
| 360 | for ancestor in exe.ancestors() { |
| 361 | let Some(name) = ancestor.file_name().and_then(|n| n.to_str()) else { |
| 362 | continue; |
| 363 | }; |
| 364 | if matches!(name, "debug" | "release") { |
| 365 | dirs.push(ancestor.to_path_buf()); |
| 366 | break; |
| 367 | } |
| 368 | } |
| 369 | } |
| 370 | for dir in ["target/release", "target/debug"] { |
| 371 | let candidate = PathBuf::from(dir); |
| 372 | if !dirs.iter().any(|existing| existing == &candidate) { |
| 373 | dirs.push(candidate); |
| 374 | } |
| 375 | } |
| 376 | dirs |
| 377 | } |
| 378 | |
| 379 | /// Find the static runtime library. |
| 380 | fn find_runtime() -> PathBuf { |
| 381 | for dir in candidate_target_dirs() { |
| 382 | let p = dir.join("libarmfortas_rt.a"); |
| 383 | if p.exists() { |
| 384 | return p; |
| 385 | } |
| 386 | } |
| 387 | panic!("libarmfortas_rt.a not found — run `cargo build` first"); |
| 388 | } |
| 389 | |
| 390 | /// Get the macOS SDK path for linking. |
| 391 | fn sdk_path() -> String { |
| 392 | let out = Command::new("xcrun") |
| 393 | .args(["--sdk", "macosx", "--show-sdk-path"]) |
| 394 | .output() |
| 395 | .expect("xcrun failed"); |
| 396 | String::from_utf8(out.stdout).unwrap().trim().to_string() |
| 397 | } |
| 398 | |
| 399 | /// Compile a single .f90 to .o with -c. |
| 400 | fn compile_to_object( |
| 401 | compiler: &Path, |
| 402 | source: &Path, |
| 403 | output: &Path, |
| 404 | opt_flag: &str, |
| 405 | search_dir: &Path, |
| 406 | ) -> Result<(), String> { |
| 407 | let result = Command::new(compiler) |
| 408 | .current_dir(search_dir) |
| 409 | .args([ |
| 410 | source.to_str().unwrap(), |
| 411 | "-c", |
| 412 | opt_flag, |
| 413 | "-o", |
| 414 | output.to_str().unwrap(), |
| 415 | &format!("-I{}", search_dir.display()), |
| 416 | ]) |
| 417 | .output() |
| 418 | .map_err(|e| format!("cannot launch compiler for {}: {}", source.display(), e))?; |
| 419 | if !result.status.success() { |
| 420 | return Err(format!( |
| 421 | "compile {} failed:\n{}", |
| 422 | source.display(), |
| 423 | String::from_utf8_lossy(&result.stderr), |
| 424 | )); |
| 425 | } |
| 426 | Ok(()) |
| 427 | } |
| 428 | |
| 429 | /// Link object files with the runtime into a binary. |
| 430 | fn link_objects(objects: &[PathBuf], output: &Path) -> Result<(), String> { |
| 431 | let runtime = find_runtime(); |
| 432 | let sdk = sdk_path(); |
| 433 | let mut args: Vec<String> = vec!["-o".into(), output.to_str().unwrap().into()]; |
| 434 | for o in objects { |
| 435 | args.push(o.to_str().unwrap().into()); |
| 436 | } |
| 437 | args.push(runtime.to_str().unwrap().into()); |
| 438 | args.extend([ |
| 439 | "-lSystem".into(), |
| 440 | "-syslibroot".into(), |
| 441 | sdk, |
| 442 | "-arch".into(), |
| 443 | "arm64".into(), |
| 444 | ]); |
| 445 | let result = Command::new("ld") |
| 446 | .args(&args) |
| 447 | .output() |
| 448 | .map_err(|e| format!("cannot launch linker: {}", e))?; |
| 449 | if !result.status.success() { |
| 450 | return Err(format!( |
| 451 | "link failed:\n{}", |
| 452 | String::from_utf8_lossy(&result.stderr), |
| 453 | )); |
| 454 | } |
| 455 | Ok(()) |
| 456 | } |
| 457 | |
| 458 | fn unique_temp_path(prefix: &str, stem: &str, tag: &str, ext: &str) -> PathBuf { |
| 459 | let id = NEXT_TEMP_ID.fetch_add(1, Ordering::Relaxed); |
| 460 | std::env::temp_dir().join(format!( |
| 461 | "afs_{}_{}_{}_{}_{}{}", |
| 462 | prefix, |
| 463 | std::process::id(), |
| 464 | id, |
| 465 | stem, |
| 466 | tag.trim_start_matches('-'), |
| 467 | ext |
| 468 | )) |
| 469 | } |
| 470 | |
| 471 | /// Extract ordered substring checks from a Fortran source file. |
| 472 | fn extract_prefixed_checks(source: &str, prefix: &str) -> Vec<Check> { |
| 473 | source |
| 474 | .lines() |
| 475 | .enumerate() |
| 476 | .filter_map(|(i, line)| { |
| 477 | let trimmed = line.trim(); |
| 478 | if let Some(rest) = trimmed.strip_prefix(prefix) { |
| 479 | Some(Check { |
| 480 | line_num: i + 1, |
| 481 | pattern: rest.trim().to_string(), |
| 482 | }) |
| 483 | } else { |
| 484 | None |
| 485 | } |
| 486 | }) |
| 487 | .collect() |
| 488 | } |
| 489 | |
| 490 | /// Extract `! CHECK:` patterns from a Fortran source file. |
| 491 | fn extract_checks(source: &str) -> Vec<Check> { |
| 492 | extract_prefixed_checks(source, "! CHECK:") |
| 493 | } |
| 494 | |
| 495 | /// Extract `! STDERR_CHECK:` patterns from a Fortran source file. |
| 496 | fn extract_stderr_checks(source: &str) -> Vec<Check> { |
| 497 | extract_prefixed_checks(source, "! STDERR_CHECK:") |
| 498 | } |
| 499 | |
| 500 | /// Extract `! XFAIL:` reason text. Returns the first reason found, or |
| 501 | /// `None` if the program has no XFAIL annotation. Multiple XFAIL lines |
| 502 | /// are allowed (only the first is reported); a typical pattern is one |
| 503 | /// audit ID per line for findings of the same class. |
| 504 | fn extract_xfail(source: &str) -> Option<String> { |
| 505 | for line in source.lines() { |
| 506 | let trimmed = line.trim(); |
| 507 | if let Some(rest) = trimmed.strip_prefix("! XFAIL:") { |
| 508 | return Some(rest.trim().to_string()); |
| 509 | } |
| 510 | } |
| 511 | None |
| 512 | } |
| 513 | |
| 514 | /// Extract `! ERROR_EXPECTED:` substring text. Returns the expected |
| 515 | /// stderr substring if any. Programs with this annotation are |
| 516 | /// asserted to fail compilation. |
| 517 | fn extract_error_expected(source: &str) -> Option<String> { |
| 518 | for line in source.lines() { |
| 519 | let trimmed = line.trim(); |
| 520 | if let Some(rest) = trimmed.strip_prefix("! ERROR_EXPECTED:") { |
| 521 | return Some(rest.trim().to_string()); |
| 522 | } |
| 523 | } |
| 524 | None |
| 525 | } |
| 526 | |
| 527 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] |
| 528 | struct ExpectedSpan { |
| 529 | line_num: usize, |
| 530 | line: usize, |
| 531 | col: usize, |
| 532 | } |
| 533 | |
| 534 | /// Extract `! ERROR_SPAN:` and parse it as an exact line:col pair. |
| 535 | fn extract_error_span(source: &str, filename: &str) -> Result<Option<ExpectedSpan>, String> { |
| 536 | let mut matches = source.lines().enumerate().filter_map(|(i, line)| { |
| 537 | let trimmed = line.trim(); |
| 538 | trimmed |
| 539 | .strip_prefix("! ERROR_SPAN:") |
| 540 | .map(|rest| (i + 1, rest.trim())) |
| 541 | }); |
| 542 | |
| 543 | let Some((line_num, raw)) = matches.next() else { |
| 544 | return Ok(None); |
| 545 | }; |
| 546 | |
| 547 | if let Some((extra_line, _)) = matches.next() { |
| 548 | return Err(format!( |
| 549 | "{}:{}: multiple ERROR_SPAN annotations are not allowed (another at line {})", |
| 550 | filename, line_num, extra_line |
| 551 | )); |
| 552 | } |
| 553 | |
| 554 | let Some((line, col)) = raw.split_once(':') else { |
| 555 | return Err(format!( |
| 556 | "{}:{}: ERROR_SPAN must be written as <line>:<col>, got '{}'", |
| 557 | filename, line_num, raw |
| 558 | )); |
| 559 | }; |
| 560 | |
| 561 | let line = line.parse::<usize>().map_err(|_| { |
| 562 | format!( |
| 563 | "{}:{}: ERROR_SPAN line must be a decimal integer, got '{}'", |
| 564 | filename, line_num, line |
| 565 | ) |
| 566 | })?; |
| 567 | let col = col.parse::<usize>().map_err(|_| { |
| 568 | format!( |
| 569 | "{}:{}: ERROR_SPAN column must be a decimal integer, got '{}'", |
| 570 | filename, line_num, col |
| 571 | ) |
| 572 | })?; |
| 573 | |
| 574 | Ok(Some(ExpectedSpan { |
| 575 | line_num, |
| 576 | line, |
| 577 | col, |
| 578 | })) |
| 579 | } |
| 580 | |
| 581 | /// Extract `! EXIT_CODE:` and parse it as an exact expected exit |
| 582 | /// status. Multiple annotations are rejected as a test setup error |
| 583 | /// so the expected runtime contract stays unambiguous. |
| 584 | fn extract_exit_code(source: &str, filename: &str) -> Result<Option<i32>, String> { |
| 585 | let mut matches = source.lines().enumerate().filter_map(|(i, line)| { |
| 586 | let trimmed = line.trim(); |
| 587 | trimmed |
| 588 | .strip_prefix("! EXIT_CODE:") |
| 589 | .map(|rest| (i + 1, rest.trim())) |
| 590 | }); |
| 591 | |
| 592 | let Some((line_num, raw)) = matches.next() else { |
| 593 | return Ok(None); |
| 594 | }; |
| 595 | |
| 596 | if let Some((extra_line, _)) = matches.next() { |
| 597 | return Err(format!( |
| 598 | "{}:{}: multiple EXIT_CODE annotations are not allowed (another at line {})", |
| 599 | filename, line_num, extra_line |
| 600 | )); |
| 601 | } |
| 602 | |
| 603 | raw.parse::<i32>().map(Some).map_err(|_| { |
| 604 | format!( |
| 605 | "{}:{}: EXIT_CODE must be a decimal integer, got '{}'", |
| 606 | filename, line_num, raw |
| 607 | ) |
| 608 | }) |
| 609 | } |
| 610 | |
| 611 | fn extract_file_checks( |
| 612 | source: &str, |
| 613 | filename: &str, |
| 614 | pos_prefix: &str, |
| 615 | neg_prefix: &str, |
| 616 | ) -> Result<Vec<FileCheck>, String> { |
| 617 | let mut checks = Vec::new(); |
| 618 | for (i, line) in source.lines().enumerate() { |
| 619 | let trimmed = line.trim(); |
| 620 | let (rest, negative) = if let Some(rest) = trimmed.strip_prefix(pos_prefix) { |
| 621 | (rest.trim(), false) |
| 622 | } else if let Some(rest) = trimmed.strip_prefix(neg_prefix) { |
| 623 | (rest.trim(), true) |
| 624 | } else { |
| 625 | continue; |
| 626 | }; |
| 627 | |
| 628 | let Some((raw_path, raw_pattern)) = rest.split_once("=>") else { |
| 629 | return Err(format!( |
| 630 | "{}:{}: {} must be written as <relative-path> => <substring>", |
| 631 | filename, |
| 632 | i + 1, |
| 633 | if negative { "FILE_NOT" } else { "FILE_CHECK" } |
| 634 | )); |
| 635 | }; |
| 636 | |
| 637 | let rel_path = raw_path.trim(); |
| 638 | if rel_path.is_empty() { |
| 639 | return Err(format!( |
| 640 | "{}:{}: FILE_CHECK/FILE_NOT path cannot be empty", |
| 641 | filename, |
| 642 | i + 1 |
| 643 | )); |
| 644 | } |
| 645 | if Path::new(rel_path).is_absolute() { |
| 646 | return Err(format!( |
| 647 | "{}:{}: FILE_CHECK/FILE_NOT path must be relative, got '{}'", |
| 648 | filename, |
| 649 | i + 1, |
| 650 | rel_path |
| 651 | )); |
| 652 | } |
| 653 | |
| 654 | checks.push(FileCheck { |
| 655 | line_num: i + 1, |
| 656 | rel_path: rel_path.to_string(), |
| 657 | pattern: raw_pattern.trim().to_string(), |
| 658 | negative, |
| 659 | }); |
| 660 | } |
| 661 | Ok(checks) |
| 662 | } |
| 663 | |
| 664 | fn extract_file_presence_checks( |
| 665 | source: &str, |
| 666 | filename: &str, |
| 667 | ) -> Result<Vec<FilePresenceCheck>, String> { |
| 668 | let mut checks = Vec::new(); |
| 669 | for (i, line) in source.lines().enumerate() { |
| 670 | let trimmed = line.trim(); |
| 671 | let (rest, should_exist, directive_name) = |
| 672 | if let Some(rest) = trimmed.strip_prefix("! FILE_EXISTS:") { |
| 673 | (rest.trim(), true, "FILE_EXISTS") |
| 674 | } else if let Some(rest) = trimmed.strip_prefix("! FILE_MISSING:") { |
| 675 | (rest.trim(), false, "FILE_MISSING") |
| 676 | } else { |
| 677 | continue; |
| 678 | }; |
| 679 | |
| 680 | if rest.is_empty() { |
| 681 | return Err(format!( |
| 682 | "{}:{}: {} path cannot be empty", |
| 683 | filename, |
| 684 | i + 1, |
| 685 | directive_name |
| 686 | )); |
| 687 | } |
| 688 | if Path::new(rest).is_absolute() { |
| 689 | return Err(format!( |
| 690 | "{}:{}: {} path must be relative, got '{}'", |
| 691 | filename, |
| 692 | i + 1, |
| 693 | directive_name, |
| 694 | rest |
| 695 | )); |
| 696 | } |
| 697 | |
| 698 | checks.push(FilePresenceCheck { |
| 699 | line_num: i + 1, |
| 700 | rel_path: rest.to_string(), |
| 701 | should_exist, |
| 702 | }); |
| 703 | } |
| 704 | Ok(checks) |
| 705 | } |
| 706 | |
| 707 | fn extract_file_line_count_checks( |
| 708 | source: &str, |
| 709 | filename: &str, |
| 710 | ) -> Result<Vec<FileLineCountCheck>, String> { |
| 711 | let mut checks = Vec::new(); |
| 712 | for (i, line) in source.lines().enumerate() { |
| 713 | let trimmed = line.trim(); |
| 714 | let Some(rest) = trimmed.strip_prefix("! FILE_LINE_COUNT:") else { |
| 715 | continue; |
| 716 | }; |
| 717 | let Some((raw_path, raw_count)) = rest.trim().split_once("=>") else { |
| 718 | return Err(format!( |
| 719 | "{}:{}: FILE_LINE_COUNT must be written as <relative-path> => <int>", |
| 720 | filename, |
| 721 | i + 1 |
| 722 | )); |
| 723 | }; |
| 724 | |
| 725 | let rel_path = raw_path.trim(); |
| 726 | if rel_path.is_empty() { |
| 727 | return Err(format!( |
| 728 | "{}:{}: FILE_LINE_COUNT path cannot be empty", |
| 729 | filename, |
| 730 | i + 1 |
| 731 | )); |
| 732 | } |
| 733 | if Path::new(rel_path).is_absolute() { |
| 734 | return Err(format!( |
| 735 | "{}:{}: FILE_LINE_COUNT path must be relative, got '{}'", |
| 736 | filename, |
| 737 | i + 1, |
| 738 | rel_path |
| 739 | )); |
| 740 | } |
| 741 | |
| 742 | let expected_lines = raw_count.trim().parse::<usize>().map_err(|_| { |
| 743 | format!( |
| 744 | "{}:{}: FILE_LINE_COUNT line count must be a non-negative integer, got '{}'", |
| 745 | filename, |
| 746 | i + 1, |
| 747 | raw_count.trim() |
| 748 | ) |
| 749 | })?; |
| 750 | |
| 751 | checks.push(FileLineCountCheck { |
| 752 | line_num: i + 1, |
| 753 | rel_path: rel_path.to_string(), |
| 754 | expected_lines, |
| 755 | }); |
| 756 | } |
| 757 | Ok(checks) |
| 758 | } |
| 759 | |
| 760 | fn extract_file_rerun_mode_checks( |
| 761 | source: &str, |
| 762 | filename: &str, |
| 763 | ) -> Result<Vec<FileRerunModeCheck>, String> { |
| 764 | let mut checks = Vec::new(); |
| 765 | for (i, line) in source.lines().enumerate() { |
| 766 | let trimmed = line.trim(); |
| 767 | let Some(rest) = trimmed.strip_prefix("! FILE_RERUN_MODE:") else { |
| 768 | continue; |
| 769 | }; |
| 770 | let Some((raw_path, raw_mode)) = rest.trim().split_once("=>") else { |
| 771 | return Err(format!( |
| 772 | "{}:{}: FILE_RERUN_MODE must be written as <relative-path> => stable|append", |
| 773 | filename, |
| 774 | i + 1 |
| 775 | )); |
| 776 | }; |
| 777 | |
| 778 | let rel_path = raw_path.trim(); |
| 779 | if rel_path.is_empty() { |
| 780 | return Err(format!( |
| 781 | "{}:{}: FILE_RERUN_MODE path cannot be empty", |
| 782 | filename, |
| 783 | i + 1 |
| 784 | )); |
| 785 | } |
| 786 | if Path::new(rel_path).is_absolute() { |
| 787 | return Err(format!( |
| 788 | "{}:{}: FILE_RERUN_MODE path must be relative, got '{}'", |
| 789 | filename, |
| 790 | i + 1, |
| 791 | rel_path |
| 792 | )); |
| 793 | } |
| 794 | |
| 795 | let mode = match raw_mode.trim() { |
| 796 | "stable" => FileRerunMode::Stable, |
| 797 | "append" => FileRerunMode::Append, |
| 798 | other => { |
| 799 | return Err(format!( |
| 800 | "{}:{}: FILE_RERUN_MODE must be stable or append; got '{}'", |
| 801 | filename, |
| 802 | i + 1, |
| 803 | other |
| 804 | )); |
| 805 | } |
| 806 | }; |
| 807 | |
| 808 | checks.push(FileRerunModeCheck { |
| 809 | line_num: i + 1, |
| 810 | rel_path: rel_path.to_string(), |
| 811 | mode, |
| 812 | }); |
| 813 | } |
| 814 | Ok(checks) |
| 815 | } |
| 816 | |
| 817 | fn extract_file_set_exact( |
| 818 | source: &str, |
| 819 | filename: &str, |
| 820 | ) -> Result<Option<FileSetExactCheck>, String> { |
| 821 | let mut found: Option<FileSetExactCheck> = None; |
| 822 | for (i, line) in source.lines().enumerate() { |
| 823 | let line_num = i + 1; |
| 824 | let trimmed = line.trim(); |
| 825 | let Some(rest) = trimmed.strip_prefix("! FILE_SET_EXACT:") else { |
| 826 | continue; |
| 827 | }; |
| 828 | if let Some(existing) = &found { |
| 829 | return Err(format!( |
| 830 | "{}:{}: multiple FILE_SET_EXACT annotations are not allowed (another at line {})", |
| 831 | filename, line_num, existing.line_num |
| 832 | )); |
| 833 | } |
| 834 | |
| 835 | let mut rel_paths = Vec::new(); |
| 836 | for token in rest.trim().split(',') { |
| 837 | let rel_path = token.trim(); |
| 838 | if rel_path.is_empty() { |
| 839 | return Err(format!( |
| 840 | "{}:{}: FILE_SET_EXACT paths cannot be empty", |
| 841 | filename, line_num |
| 842 | )); |
| 843 | } |
| 844 | if Path::new(rel_path).is_absolute() { |
| 845 | return Err(format!( |
| 846 | "{}:{}: FILE_SET_EXACT paths must be relative, got '{}'", |
| 847 | filename, line_num, rel_path |
| 848 | )); |
| 849 | } |
| 850 | if !rel_paths |
| 851 | .iter() |
| 852 | .any(|existing: &String| existing == rel_path) |
| 853 | { |
| 854 | rel_paths.push(rel_path.to_string()); |
| 855 | } |
| 856 | } |
| 857 | if rel_paths.is_empty() { |
| 858 | return Err(format!( |
| 859 | "{}:{}: FILE_SET_EXACT needs at least one relative path", |
| 860 | filename, line_num |
| 861 | )); |
| 862 | } |
| 863 | rel_paths.sort(); |
| 864 | |
| 865 | found = Some(FileSetExactCheck { |
| 866 | line_num, |
| 867 | rel_paths, |
| 868 | }); |
| 869 | } |
| 870 | Ok(found) |
| 871 | } |
| 872 | |
| 873 | fn extract_repro_checks(source: &str, filename: &str) -> Result<Vec<ReproStage>, String> { |
| 874 | let mut stages = Vec::new(); |
| 875 | for (i, line) in source.lines().enumerate() { |
| 876 | let trimmed = line.trim(); |
| 877 | let Some(rest) = trimmed.strip_prefix("! REPRO_CHECK:") else { |
| 878 | continue; |
| 879 | }; |
| 880 | let stage = match rest.trim() { |
| 881 | "asm" => ReproStage::Asm, |
| 882 | "obj" => ReproStage::Obj, |
| 883 | "run" => ReproStage::Run, |
| 884 | "run_same_sandbox" => ReproStage::RunSameSandbox, |
| 885 | other => { |
| 886 | return Err(format!( |
| 887 | "{}:{}: REPRO_CHECK must be one of asm, obj, run, run_same_sandbox; got '{}'", |
| 888 | filename, |
| 889 | i + 1, |
| 890 | other |
| 891 | )); |
| 892 | } |
| 893 | }; |
| 894 | if !stages.contains(&stage) { |
| 895 | stages.push(stage); |
| 896 | } |
| 897 | } |
| 898 | Ok(stages) |
| 899 | } |
| 900 | |
| 901 | fn parse_supported_opt(token: &str, filename: &str, line_num: usize) -> Result<String, String> { |
| 902 | let trimmed = token.trim().trim_start_matches('-'); |
| 903 | match trimmed { |
| 904 | "O0" | "O1" | "O2" | "O3" | "Os" | "Ofast" => Ok(format!("-{}", trimmed)), |
| 905 | other => Err(format!( |
| 906 | "{}:{}: OPT_EQ only supports O0, O1, O2, O3, Os, or Ofast; got '{}'", |
| 907 | filename, line_num, other |
| 908 | )), |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | fn extract_opt_eq_rules(source: &str, filename: &str) -> Result<Vec<OptEqRule>, String> { |
| 913 | let mut rules = Vec::new(); |
| 914 | for (i, line) in source.lines().enumerate() { |
| 915 | let line_num = i + 1; |
| 916 | let trimmed = line.trim(); |
| 917 | let Some(rest) = trimmed.strip_prefix("! OPT_EQ:") else { |
| 918 | continue; |
| 919 | }; |
| 920 | let Some((opts_text, components_text)) = rest.trim().split_once("=>") else { |
| 921 | return Err(format!( |
| 922 | "{}:{}: OPT_EQ must be written as <opts> => <components>", |
| 923 | filename, line_num |
| 924 | )); |
| 925 | }; |
| 926 | |
| 927 | let opt_flags = opts_text |
| 928 | .split(',') |
| 929 | .map(|token| parse_supported_opt(token, filename, line_num)) |
| 930 | .collect::<Result<Vec<_>, _>>()?; |
| 931 | if opt_flags.len() < 2 { |
| 932 | return Err(format!( |
| 933 | "{}:{}: OPT_EQ needs at least two opt levels to compare", |
| 934 | filename, line_num |
| 935 | )); |
| 936 | } |
| 937 | |
| 938 | let mut components = Vec::new(); |
| 939 | for token in components_text.split('|') { |
| 940 | let component = match token.trim() { |
| 941 | "stdout" => OptEqComponent::Stdout, |
| 942 | "stderr" => OptEqComponent::Stderr, |
| 943 | "exit" => OptEqComponent::Exit, |
| 944 | "asm" => OptEqComponent::Asm, |
| 945 | other => { |
| 946 | return Err(format!( |
| 947 | "{}:{}: OPT_EQ components must be stdout, stderr, exit, or asm; got '{}'", |
| 948 | filename, line_num, other |
| 949 | )) |
| 950 | } |
| 951 | }; |
| 952 | components.push(component); |
| 953 | } |
| 954 | if components.is_empty() { |
| 955 | return Err(format!( |
| 956 | "{}:{}: OPT_EQ needs at least one comparison component", |
| 957 | filename, line_num |
| 958 | )); |
| 959 | } |
| 960 | |
| 961 | rules.push(OptEqRule { |
| 962 | line_num, |
| 963 | opt_flags, |
| 964 | components, |
| 965 | }); |
| 966 | } |
| 967 | Ok(rules) |
| 968 | } |
| 969 | |
| 970 | fn extract_phase_triangulation( |
| 971 | source: &str, |
| 972 | filename: &str, |
| 973 | ) -> Result<Option<PhaseTriangulation>, String> { |
| 974 | let mut found: Option<PhaseTriangulation> = None; |
| 975 | for (i, line) in source.lines().enumerate() { |
| 976 | let line_num = i + 1; |
| 977 | let trimmed = line.trim(); |
| 978 | let Some(rest) = trimmed.strip_prefix("! PHASE_TRIANGULATE:") else { |
| 979 | continue; |
| 980 | }; |
| 981 | if let Some(existing) = &found { |
| 982 | return Err(format!( |
| 983 | "{}:{}: multiple PHASE_TRIANGULATE annotations are not allowed (another at line {})", |
| 984 | filename, line_num, existing.line_num |
| 985 | )); |
| 986 | } |
| 987 | |
| 988 | let mut surfaces = Vec::new(); |
| 989 | for token in rest.trim().split('|') { |
| 990 | let surface = match token.trim() { |
| 991 | "ir" => PhaseSurface::Ir, |
| 992 | "asm" => PhaseSurface::Asm, |
| 993 | "obj" => PhaseSurface::Obj, |
| 994 | "clean" => PhaseSurface::Clean, |
| 995 | "repro" => PhaseSurface::Repro, |
| 996 | other => { |
| 997 | return Err(format!( |
| 998 | "{}:{}: PHASE_TRIANGULATE surfaces must be ir, asm, obj, clean, or repro; got '{}'", |
| 999 | filename, line_num, other |
| 1000 | )) |
| 1001 | } |
| 1002 | }; |
| 1003 | if !surfaces.contains(&surface) { |
| 1004 | surfaces.push(surface); |
| 1005 | } |
| 1006 | } |
| 1007 | if surfaces.is_empty() { |
| 1008 | return Err(format!( |
| 1009 | "{}:{}: PHASE_TRIANGULATE needs at least one surface", |
| 1010 | filename, line_num |
| 1011 | )); |
| 1012 | } |
| 1013 | if surfaces |
| 1014 | .iter() |
| 1015 | .all(|surface| matches!(surface, PhaseSurface::Clean | PhaseSurface::Repro)) |
| 1016 | { |
| 1017 | return Err(format!( |
| 1018 | "{}:{}: PHASE_TRIANGULATE policy-only annotations need at least one of ir, asm, or obj", |
| 1019 | filename, line_num |
| 1020 | )); |
| 1021 | } |
| 1022 | found = Some(PhaseTriangulation { line_num, surfaces }); |
| 1023 | } |
| 1024 | Ok(found) |
| 1025 | } |
| 1026 | |
| 1027 | fn diagnostic_contains_span(stderr: &str, expected: ExpectedSpan) -> bool { |
| 1028 | let needle = format!("{}:{}:", expected.line, expected.col); |
| 1029 | stderr.contains(&needle) |
| 1030 | } |
| 1031 | |
| 1032 | /// A single text-shape assertion. Positive checks must appear in |
| 1033 | /// order; negative checks must not appear at all. Source line |
| 1034 | /// numbers are kept so failure messages can point at the right |
| 1035 | /// annotation. |
| 1036 | struct ShapeCheck { |
| 1037 | line_num: usize, |
| 1038 | pattern: String, |
| 1039 | negative: bool, |
| 1040 | } |
| 1041 | |
| 1042 | /// Extract positive and negative shape assertions from a source. |
| 1043 | fn extract_shape_checks(source: &str, pos_prefix: &str, neg_prefix: &str) -> Vec<ShapeCheck> { |
| 1044 | source |
| 1045 | .lines() |
| 1046 | .enumerate() |
| 1047 | .filter_map(|(i, line)| { |
| 1048 | let trimmed = line.trim(); |
| 1049 | if let Some(rest) = trimmed.strip_prefix(pos_prefix) { |
| 1050 | Some(ShapeCheck { |
| 1051 | line_num: i + 1, |
| 1052 | pattern: rest.trim().to_string(), |
| 1053 | negative: false, |
| 1054 | }) |
| 1055 | } else if let Some(rest) = trimmed.strip_prefix(neg_prefix) { |
| 1056 | Some(ShapeCheck { |
| 1057 | line_num: i + 1, |
| 1058 | pattern: rest.trim().to_string(), |
| 1059 | negative: true, |
| 1060 | }) |
| 1061 | } else { |
| 1062 | None |
| 1063 | } |
| 1064 | }) |
| 1065 | .collect() |
| 1066 | } |
| 1067 | |
| 1068 | /// Extract `! IR_CHECK:` and `! IR_NOT:` annotations from a source. |
| 1069 | fn extract_ir_checks(source: &str) -> Vec<ShapeCheck> { |
| 1070 | extract_shape_checks(source, "! IR_CHECK:", "! IR_NOT:") |
| 1071 | } |
| 1072 | |
| 1073 | /// Extract `! ASM_CHECK:` and `! ASM_NOT:` annotations from a source. |
| 1074 | fn extract_asm_checks(source: &str) -> Vec<ShapeCheck> { |
| 1075 | extract_shape_checks(source, "! ASM_CHECK:", "! ASM_NOT:") |
| 1076 | } |
| 1077 | |
| 1078 | /// Apply IR shape assertions against an --emit-ir text dump. |
| 1079 | /// Positive assertions match in declared order (intervening lines |
| 1080 | /// are allowed). Negative assertions match against the entire |
| 1081 | /// text — if the substring appears anywhere, the test fails. |
| 1082 | fn match_shape_checks( |
| 1083 | checks: &[ShapeCheck], |
| 1084 | text: &str, |
| 1085 | filename: &str, |
| 1086 | directive_name: &str, |
| 1087 | full_label: &str, |
| 1088 | ) -> Result<(), String> { |
| 1089 | let mut search_offset = 0; |
| 1090 | for check in checks { |
| 1091 | if check.negative { |
| 1092 | if text.contains(&check.pattern) { |
| 1093 | return Err(format!( |
| 1094 | "{}:{}: {} failed: substring '{}' appears in {}\n\ |
| 1095 | Full {}:\n{}", |
| 1096 | filename, |
| 1097 | check.line_num, |
| 1098 | directive_name, |
| 1099 | check.pattern, |
| 1100 | full_label, |
| 1101 | full_label, |
| 1102 | text, |
| 1103 | )); |
| 1104 | } |
| 1105 | } else { |
| 1106 | // Positive: search forward from the previous match |
| 1107 | // position so multiple checks enforce ordering. |
| 1108 | if let Some(rel) = text[search_offset..].find(&check.pattern) { |
| 1109 | search_offset += rel + check.pattern.len(); |
| 1110 | } else { |
| 1111 | return Err(format!( |
| 1112 | "{}:{}: {} failed: substring '{}' not found from offset {}\n\ |
| 1113 | Full {}:\n{}", |
| 1114 | filename, |
| 1115 | check.line_num, |
| 1116 | directive_name, |
| 1117 | check.pattern, |
| 1118 | search_offset, |
| 1119 | full_label, |
| 1120 | text, |
| 1121 | )); |
| 1122 | } |
| 1123 | } |
| 1124 | } |
| 1125 | Ok(()) |
| 1126 | } |
| 1127 | |
| 1128 | /// Apply IR shape assertions against an --emit-ir text dump. |
| 1129 | fn match_ir_checks(checks: &[ShapeCheck], ir: &str, filename: &str) -> Result<(), String> { |
| 1130 | match_shape_checks(checks, ir, filename, "IR_CHECK/IR_NOT", "IR") |
| 1131 | } |
| 1132 | |
| 1133 | /// Apply assembly shape assertions against a -S text dump. |
| 1134 | fn match_asm_checks(checks: &[ShapeCheck], asm: &str, filename: &str) -> Result<(), String> { |
| 1135 | match_shape_checks(checks, asm, filename, "ASM_CHECK/ASM_NOT", "assembly") |
| 1136 | } |
| 1137 | |
| 1138 | fn match_file_checks( |
| 1139 | checks: &[FileCheck], |
| 1140 | files: &BTreeMap<String, Vec<u8>>, |
| 1141 | filename: &str, |
| 1142 | ) -> Result<(), String> { |
| 1143 | let mut search_offsets: BTreeMap<&str, usize> = BTreeMap::new(); |
| 1144 | |
| 1145 | for check in checks { |
| 1146 | let Some(bytes) = files.get(&check.rel_path) else { |
| 1147 | return Err(format!( |
| 1148 | "{}:{}: FILE_CHECK/FILE_NOT expected sandbox file '{}' to exist", |
| 1149 | filename, check.line_num, check.rel_path |
| 1150 | )); |
| 1151 | }; |
| 1152 | let text = String::from_utf8_lossy(bytes); |
| 1153 | if check.negative { |
| 1154 | if text.contains(&check.pattern) { |
| 1155 | return Err(format!( |
| 1156 | "{}:{}: FILE_CHECK/FILE_NOT failed: substring '{}' appears in sandbox file '{}'\n\ |
| 1157 | Full file contents:\n{}", |
| 1158 | filename, check.line_num, check.pattern, check.rel_path, text |
| 1159 | )); |
| 1160 | } |
| 1161 | } else { |
| 1162 | let search_offset = search_offsets.entry(&check.rel_path).or_insert(0); |
| 1163 | if let Some(rel) = text[*search_offset..].find(&check.pattern) { |
| 1164 | *search_offset += rel + check.pattern.len(); |
| 1165 | } else { |
| 1166 | return Err(format!( |
| 1167 | "{}:{}: FILE_CHECK/FILE_NOT failed: substring '{}' not found in sandbox file '{}' from offset {}\n\ |
| 1168 | Full file contents:\n{}", |
| 1169 | filename, |
| 1170 | check.line_num, |
| 1171 | check.pattern, |
| 1172 | check.rel_path, |
| 1173 | *search_offset, |
| 1174 | text |
| 1175 | )); |
| 1176 | } |
| 1177 | } |
| 1178 | } |
| 1179 | |
| 1180 | Ok(()) |
| 1181 | } |
| 1182 | |
| 1183 | fn collect_sandbox_files( |
| 1184 | root: &Path, |
| 1185 | dir: &Path, |
| 1186 | out: &mut BTreeMap<String, Vec<u8>>, |
| 1187 | ) -> std::io::Result<()> { |
| 1188 | for entry in fs::read_dir(dir)? { |
| 1189 | let entry = entry?; |
| 1190 | let path = entry.path(); |
| 1191 | if entry.file_type()?.is_dir() { |
| 1192 | collect_sandbox_files(root, &path, out)?; |
| 1193 | } else { |
| 1194 | let rel = path.strip_prefix(root).unwrap(); |
| 1195 | out.insert(rel.to_string_lossy().replace('\\', "/"), fs::read(&path)?); |
| 1196 | } |
| 1197 | } |
| 1198 | Ok(()) |
| 1199 | } |
| 1200 | |
| 1201 | fn match_file_presence_checks( |
| 1202 | checks: &[FilePresenceCheck], |
| 1203 | files: &BTreeMap<String, Vec<u8>>, |
| 1204 | filename: &str, |
| 1205 | ) -> Result<(), String> { |
| 1206 | for check in checks { |
| 1207 | let exists = files.contains_key(&check.rel_path); |
| 1208 | if check.should_exist && !exists { |
| 1209 | return Err(format!( |
| 1210 | "{}:{}: FILE_EXISTS failed: sandbox file '{}' was not created", |
| 1211 | filename, check.line_num, check.rel_path |
| 1212 | )); |
| 1213 | } |
| 1214 | if !check.should_exist && exists { |
| 1215 | return Err(format!( |
| 1216 | "{}:{}: FILE_MISSING failed: sandbox file '{}' was created", |
| 1217 | filename, check.line_num, check.rel_path |
| 1218 | )); |
| 1219 | } |
| 1220 | } |
| 1221 | Ok(()) |
| 1222 | } |
| 1223 | |
| 1224 | fn match_file_line_count_checks( |
| 1225 | checks: &[FileLineCountCheck], |
| 1226 | files: &BTreeMap<String, Vec<u8>>, |
| 1227 | filename: &str, |
| 1228 | ) -> Result<(), String> { |
| 1229 | for check in checks { |
| 1230 | let Some(bytes) = files.get(&check.rel_path) else { |
| 1231 | return Err(format!( |
| 1232 | "{}:{}: FILE_LINE_COUNT expected sandbox file '{}' to exist", |
| 1233 | filename, check.line_num, check.rel_path |
| 1234 | )); |
| 1235 | }; |
| 1236 | let text = String::from_utf8_lossy(bytes); |
| 1237 | let actual_lines = text.lines().count(); |
| 1238 | if actual_lines != check.expected_lines { |
| 1239 | return Err(format!( |
| 1240 | "{}:{}: FILE_LINE_COUNT failed for '{}': expected {} lines, got {}\n\ |
| 1241 | Full file contents:\n{}", |
| 1242 | filename, check.line_num, check.rel_path, check.expected_lines, actual_lines, text |
| 1243 | )); |
| 1244 | } |
| 1245 | } |
| 1246 | Ok(()) |
| 1247 | } |
| 1248 | |
| 1249 | fn match_file_rerun_mode_checks( |
| 1250 | checks: &[FileRerunModeCheck], |
| 1251 | first: &RunSnapshot, |
| 1252 | second: &RunSnapshot, |
| 1253 | filename: &str, |
| 1254 | ) -> Result<(), String> { |
| 1255 | for check in checks { |
| 1256 | let Some(first_bytes) = first.files.get(&check.rel_path) else { |
| 1257 | return Err(format!( |
| 1258 | "{}:{}: FILE_RERUN_MODE expected sandbox file '{}' after first run", |
| 1259 | filename, check.line_num, check.rel_path |
| 1260 | )); |
| 1261 | }; |
| 1262 | let Some(second_bytes) = second.files.get(&check.rel_path) else { |
| 1263 | return Err(format!( |
| 1264 | "{}:{}: FILE_RERUN_MODE expected sandbox file '{}' after second run", |
| 1265 | filename, check.line_num, check.rel_path |
| 1266 | )); |
| 1267 | }; |
| 1268 | |
| 1269 | match check.mode { |
| 1270 | FileRerunMode::Stable if first_bytes != second_bytes => { |
| 1271 | return Err(format!( |
| 1272 | "{}:{}: FILE_RERUN_MODE(stable) failed for '{}': file contents changed across rerun", |
| 1273 | filename, check.line_num, check.rel_path |
| 1274 | )); |
| 1275 | } |
| 1276 | FileRerunMode::Append => { |
| 1277 | if second_bytes.len() <= first_bytes.len() || !second_bytes.starts_with(first_bytes) |
| 1278 | { |
| 1279 | return Err(format!( |
| 1280 | "{}:{}: FILE_RERUN_MODE(append) failed for '{}': second run did not strictly append to first-run contents", |
| 1281 | filename, check.line_num, check.rel_path |
| 1282 | )); |
| 1283 | } |
| 1284 | } |
| 1285 | FileRerunMode::Stable => {} |
| 1286 | } |
| 1287 | } |
| 1288 | Ok(()) |
| 1289 | } |
| 1290 | |
| 1291 | fn match_file_set_exact( |
| 1292 | check: &FileSetExactCheck, |
| 1293 | files: &BTreeMap<String, Vec<u8>>, |
| 1294 | filename: &str, |
| 1295 | ) -> Result<(), String> { |
| 1296 | let actual = files.keys().cloned().collect::<Vec<_>>(); |
| 1297 | if actual != check.rel_paths { |
| 1298 | return Err(format!( |
| 1299 | "{}:{}: FILE_SET_EXACT failed: expected {:?}, got {:?}", |
| 1300 | filename, check.line_num, check.rel_paths, actual |
| 1301 | )); |
| 1302 | } |
| 1303 | Ok(()) |
| 1304 | } |
| 1305 | |
| 1306 | fn collect_declared_runtime_paths( |
| 1307 | file_checks: &[FileCheck], |
| 1308 | file_presence_checks: &[FilePresenceCheck], |
| 1309 | file_line_count_checks: &[FileLineCountCheck], |
| 1310 | file_rerun_mode_checks: &[FileRerunModeCheck], |
| 1311 | file_set_exact: Option<&FileSetExactCheck>, |
| 1312 | ) -> BTreeMap<String, String> { |
| 1313 | let mut paths = BTreeMap::new(); |
| 1314 | for check in file_checks { |
| 1315 | paths.entry(check.rel_path.clone()).or_insert_with(|| { |
| 1316 | if check.negative { |
| 1317 | "FILE_NOT".to_string() |
| 1318 | } else { |
| 1319 | "FILE_CHECK".to_string() |
| 1320 | } |
| 1321 | }); |
| 1322 | } |
| 1323 | for check in file_presence_checks { |
| 1324 | paths.entry(check.rel_path.clone()).or_insert_with(|| { |
| 1325 | if check.should_exist { |
| 1326 | "FILE_EXISTS".to_string() |
| 1327 | } else { |
| 1328 | "FILE_MISSING".to_string() |
| 1329 | } |
| 1330 | }); |
| 1331 | } |
| 1332 | for check in file_line_count_checks { |
| 1333 | paths |
| 1334 | .entry(check.rel_path.clone()) |
| 1335 | .or_insert_with(|| "FILE_LINE_COUNT".to_string()); |
| 1336 | } |
| 1337 | for check in file_rerun_mode_checks { |
| 1338 | paths |
| 1339 | .entry(check.rel_path.clone()) |
| 1340 | .or_insert_with(|| "FILE_RERUN_MODE".to_string()); |
| 1341 | } |
| 1342 | if let Some(check) = file_set_exact { |
| 1343 | for rel_path in &check.rel_paths { |
| 1344 | paths |
| 1345 | .entry(rel_path.clone()) |
| 1346 | .or_insert_with(|| "FILE_SET_EXACT".to_string()); |
| 1347 | } |
| 1348 | } |
| 1349 | paths |
| 1350 | } |
| 1351 | |
| 1352 | fn snapshot_sandbox_files( |
| 1353 | sandbox: &Path, |
| 1354 | filename: &str, |
| 1355 | ) -> Result<BTreeMap<String, Vec<u8>>, String> { |
| 1356 | let mut files = BTreeMap::new(); |
| 1357 | collect_sandbox_files(sandbox, sandbox, &mut files).map_err(|e| { |
| 1358 | format!( |
| 1359 | "{}: cannot snapshot sandbox {}: {}", |
| 1360 | filename, |
| 1361 | sandbox.display(), |
| 1362 | e |
| 1363 | ) |
| 1364 | })?; |
| 1365 | Ok(files) |
| 1366 | } |
| 1367 | |
| 1368 | #[derive(Debug)] |
| 1369 | struct PhaseArtifact { |
| 1370 | sandbox_files: BTreeMap<String, Vec<u8>>, |
| 1371 | output_rel_path: String, |
| 1372 | output_bytes: Vec<u8>, |
| 1373 | } |
| 1374 | |
| 1375 | fn run_binary_in_sandbox( |
| 1376 | binary: &Path, |
| 1377 | sandbox: &Path, |
| 1378 | filename: &str, |
| 1379 | ) -> Result<RunSnapshot, String> { |
| 1380 | let run = Command::new(binary) |
| 1381 | .current_dir(sandbox) |
| 1382 | .output() |
| 1383 | .map_err(|e| format!("{}: cannot run binary: {}", filename, e))?; |
| 1384 | Ok(RunSnapshot { |
| 1385 | exit_code: run.status.code().unwrap_or(-1), |
| 1386 | stdout: String::from_utf8_lossy(&run.stdout).into_owned(), |
| 1387 | stderr: String::from_utf8_lossy(&run.stderr).into_owned(), |
| 1388 | files: snapshot_sandbox_files(sandbox, filename)?, |
| 1389 | }) |
| 1390 | } |
| 1391 | |
| 1392 | fn compile_phase_artifact( |
| 1393 | compiler: &Path, |
| 1394 | source: &Path, |
| 1395 | opt_flag: &str, |
| 1396 | phase: PhaseSurface, |
| 1397 | filename: &str, |
| 1398 | ) -> Result<PhaseArtifact, String> { |
| 1399 | let compiler_path = fs::canonicalize(compiler) |
| 1400 | .map_err(|e| format!("{}: cannot canonicalize compiler path: {}", filename, e))?; |
| 1401 | let source_path = fs::canonicalize(source) |
| 1402 | .map_err(|e| format!("{}: cannot canonicalize source path: {}", filename, e))?; |
| 1403 | let stem = source.file_stem().unwrap().to_str().unwrap(); |
| 1404 | let level = opt_flag.trim_start_matches('-'); |
| 1405 | let phase_name = match phase { |
| 1406 | PhaseSurface::Ir => "ir", |
| 1407 | PhaseSurface::Asm => "asm", |
| 1408 | PhaseSurface::Obj => "obj", |
| 1409 | PhaseSurface::Clean => unreachable!("clean is a triangulation policy, not an artifact"), |
| 1410 | PhaseSurface::Repro => unreachable!("repro is a triangulation policy, not an artifact"), |
| 1411 | }; |
| 1412 | let phase_sandbox = unique_temp_path( |
| 1413 | "phase_sandbox", |
| 1414 | stem, |
| 1415 | &format!("{}_{}", level, phase_name), |
| 1416 | "", |
| 1417 | ); |
| 1418 | fs::create_dir_all(&phase_sandbox).map_err(|e| { |
| 1419 | format!( |
| 1420 | "{}: cannot create phase sandbox dir {}: {}", |
| 1421 | filename, |
| 1422 | phase_sandbox.display(), |
| 1423 | e |
| 1424 | ) |
| 1425 | })?; |
| 1426 | let (output_name, extra_args): (&str, &[&str]) = match phase { |
| 1427 | PhaseSurface::Ir => ("phase-output.ir", &["--emit-ir"]), |
| 1428 | PhaseSurface::Asm => ("phase-output.s", &["-S"]), |
| 1429 | PhaseSurface::Obj => ("phase-output.o", &["-c"]), |
| 1430 | PhaseSurface::Clean => unreachable!(), |
| 1431 | PhaseSurface::Repro => unreachable!(), |
| 1432 | }; |
| 1433 | let output_path = phase_sandbox.join(output_name); |
| 1434 | |
| 1435 | let compile = Command::new(&compiler_path) |
| 1436 | .current_dir(&phase_sandbox) |
| 1437 | .args([source_path.to_str().unwrap(), opt_flag]) |
| 1438 | .args(extra_args) |
| 1439 | .args(["-o", output_path.to_str().unwrap()]) |
| 1440 | .output() |
| 1441 | .map_err(|e| format!("{}: cannot run {} compile: {}", filename, phase_name, e))?; |
| 1442 | if !compile.status.success() { |
| 1443 | let stderr = String::from_utf8_lossy(&compile.stderr); |
| 1444 | let _ = fs::remove_dir_all(&phase_sandbox); |
| 1445 | return Err(format!( |
| 1446 | "{} [{}]: {} compilation failed:\n{}", |
| 1447 | filename, opt_flag, phase_name, stderr |
| 1448 | )); |
| 1449 | } |
| 1450 | |
| 1451 | let output_bytes = fs::read(&output_path).map_err(|e| { |
| 1452 | format!( |
| 1453 | "{}: cannot read {} output {}: {}", |
| 1454 | filename, |
| 1455 | phase_name, |
| 1456 | output_path.display(), |
| 1457 | e |
| 1458 | ) |
| 1459 | })?; |
| 1460 | let sandbox_files = snapshot_sandbox_files(&phase_sandbox, filename)?; |
| 1461 | let artifact = PhaseArtifact { |
| 1462 | sandbox_files, |
| 1463 | output_rel_path: output_name.to_string(), |
| 1464 | output_bytes, |
| 1465 | }; |
| 1466 | let _ = fs::remove_dir_all(&phase_sandbox); |
| 1467 | Ok(artifact) |
| 1468 | } |
| 1469 | |
| 1470 | /// Match checks against actual output lines. Checks must appear in order |
| 1471 | /// but not necessarily consecutively — intervening output lines are allowed. |
| 1472 | fn match_checks( |
| 1473 | checks: &[Check], |
| 1474 | output: &str, |
| 1475 | filename: &str, |
| 1476 | directive_name: &str, |
| 1477 | ) -> Result<(), String> { |
| 1478 | let output_lines: Vec<&str> = output.lines().collect(); |
| 1479 | let mut output_idx = 0; |
| 1480 | |
| 1481 | for check in checks { |
| 1482 | let mut found = false; |
| 1483 | while output_idx < output_lines.len() { |
| 1484 | if output_lines[output_idx].trim().contains(&check.pattern) { |
| 1485 | found = true; |
| 1486 | output_idx += 1; |
| 1487 | break; |
| 1488 | } |
| 1489 | output_idx += 1; |
| 1490 | } |
| 1491 | if !found { |
| 1492 | return Err(format!( |
| 1493 | "{}:{}: {} failed: expected '{}' not found in remaining output\n\ |
| 1494 | Full output:\n{}", |
| 1495 | filename, check.line_num, directive_name, check.pattern, output |
| 1496 | )); |
| 1497 | } |
| 1498 | } |
| 1499 | |
| 1500 | Ok(()) |
| 1501 | } |
| 1502 | |
| 1503 | /// Find the armfortas binary. |
| 1504 | fn find_compiler() -> PathBuf { |
| 1505 | for dir in candidate_target_dirs() { |
| 1506 | let p = dir.join("armfortas"); |
| 1507 | if p.exists() { |
| 1508 | return fs::canonicalize(&p).unwrap_or_else(|e| { |
| 1509 | panic!("cannot canonicalize compiler path {}: {}", p.display(), e) |
| 1510 | }); |
| 1511 | } |
| 1512 | } |
| 1513 | panic!("cannot find armfortas binary — run `cargo build` first"); |
| 1514 | } |
| 1515 | |
| 1516 | /// Find the test_programs directory. |
| 1517 | fn find_test_programs() -> PathBuf { |
| 1518 | let candidates = ["test_programs", "../test_programs"]; |
| 1519 | for c in &candidates { |
| 1520 | let p = PathBuf::from(c); |
| 1521 | if p.is_dir() { |
| 1522 | return p; |
| 1523 | } |
| 1524 | } |
| 1525 | panic!("cannot find test_programs/ directory"); |
| 1526 | } |
| 1527 | |
| 1528 | fn is_test_program_source(path: &Path) -> bool { |
| 1529 | matches!( |
| 1530 | path.extension() |
| 1531 | .and_then(|ext| ext.to_str()) |
| 1532 | .map(|ext| ext.to_ascii_lowercase()) |
| 1533 | .as_deref(), |
| 1534 | Some("f90" | "f95" | "f03" | "f08" | "f18" | "f23" | "f" | "for" | "ftn" | "fpp") |
| 1535 | ) |
| 1536 | } |
| 1537 | |
| 1538 | fn compile_stage_bytes( |
| 1539 | compiler: &Path, |
| 1540 | source: &Path, |
| 1541 | opt_flag: &str, |
| 1542 | stage: ReproStage, |
| 1543 | ) -> Result<Vec<u8>, String> { |
| 1544 | let source_path = fs::canonicalize(source).map_err(|e| { |
| 1545 | format!( |
| 1546 | "{}: cannot canonicalize source path: {}", |
| 1547 | source.display(), |
| 1548 | e |
| 1549 | ) |
| 1550 | })?; |
| 1551 | let stem = source.file_stem().unwrap().to_str().unwrap(); |
| 1552 | let level = opt_flag.trim_start_matches('-'); |
| 1553 | let (kind, ext, extra_args): (&str, &str, &[&str]) = match stage { |
| 1554 | ReproStage::Asm => ("asm", ".s", &["-S"]), |
| 1555 | ReproStage::Obj => ("obj", ".o", &["-c"]), |
| 1556 | ReproStage::Run => unreachable!("run reproducibility uses runtime snapshots"), |
| 1557 | ReproStage::RunSameSandbox => { |
| 1558 | unreachable!("same-sandbox reproducibility uses runtime snapshots") |
| 1559 | } |
| 1560 | }; |
| 1561 | let out = unique_temp_path(kind, stem, level, ext); |
| 1562 | let compile_sandbox = |
| 1563 | unique_temp_path("compile_sandbox", stem, &format!("{}_{}", level, kind), ""); |
| 1564 | fs::create_dir_all(&compile_sandbox).map_err(|e| { |
| 1565 | format!( |
| 1566 | "{}: cannot create {} compile sandbox {}: {}", |
| 1567 | source.display(), |
| 1568 | kind, |
| 1569 | compile_sandbox.display(), |
| 1570 | e |
| 1571 | ) |
| 1572 | })?; |
| 1573 | let compile = Command::new(compiler) |
| 1574 | .current_dir(&compile_sandbox) |
| 1575 | .args([source_path.to_str().unwrap(), opt_flag]) |
| 1576 | .args(extra_args) |
| 1577 | .args(["-o", out.to_str().unwrap()]) |
| 1578 | .output() |
| 1579 | .map_err(|e| { |
| 1580 | format!( |
| 1581 | "{}: cannot run compiler for {} repro: {}", |
| 1582 | source.display(), |
| 1583 | kind, |
| 1584 | e |
| 1585 | ) |
| 1586 | })?; |
| 1587 | if !compile.status.success() { |
| 1588 | let stderr = String::from_utf8_lossy(&compile.stderr); |
| 1589 | let _ = fs::remove_file(&out); |
| 1590 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 1591 | return Err(format!( |
| 1592 | "{} [{}]: {} reproducibility compile failed:\n{}", |
| 1593 | source.file_name().unwrap().to_string_lossy(), |
| 1594 | opt_flag, |
| 1595 | kind, |
| 1596 | stderr |
| 1597 | )); |
| 1598 | } |
| 1599 | let bytes = fs::read(&out).map_err(|e| { |
| 1600 | format!( |
| 1601 | "{}: cannot read {} output {}: {}", |
| 1602 | source.display(), |
| 1603 | kind, |
| 1604 | out.display(), |
| 1605 | e |
| 1606 | ) |
| 1607 | })?; |
| 1608 | let _ = fs::remove_file(&out); |
| 1609 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 1610 | Ok(bytes) |
| 1611 | } |
| 1612 | |
| 1613 | fn compile_and_run_snapshot( |
| 1614 | compiler: &Path, |
| 1615 | source: &Path, |
| 1616 | opt_flag: &str, |
| 1617 | filename: &str, |
| 1618 | ) -> Result<RunSnapshot, String> { |
| 1619 | let source_path = fs::canonicalize(source) |
| 1620 | .map_err(|e| format!("{}: cannot canonicalize source path: {}", filename, e))?; |
| 1621 | let stem = source.file_stem().unwrap().to_str().unwrap(); |
| 1622 | let level = opt_flag.trim_start_matches('-'); |
| 1623 | let binary = unique_temp_path("test_bin", stem, level, ""); |
| 1624 | let sandbox = unique_temp_path("test_sandbox", stem, &format!("{}_opt_eq", level), ""); |
| 1625 | let compile_sandbox = |
| 1626 | unique_temp_path("compile_sandbox", stem, &format!("{}_opt_eq", level), ""); |
| 1627 | fs::create_dir_all(&compile_sandbox).map_err(|e| { |
| 1628 | format!( |
| 1629 | "{}: cannot create OPT_EQ compile sandbox {}: {}", |
| 1630 | filename, |
| 1631 | compile_sandbox.display(), |
| 1632 | e |
| 1633 | ) |
| 1634 | })?; |
| 1635 | |
| 1636 | let compile = Command::new(compiler) |
| 1637 | .current_dir(&compile_sandbox) |
| 1638 | .args([ |
| 1639 | source_path.to_str().unwrap(), |
| 1640 | opt_flag, |
| 1641 | "-o", |
| 1642 | binary.to_str().unwrap(), |
| 1643 | ]) |
| 1644 | .output() |
| 1645 | .map_err(|e| format!("{}: cannot run compiler: {}", filename, e))?; |
| 1646 | if !compile.status.success() { |
| 1647 | let stderr = String::from_utf8_lossy(&compile.stderr); |
| 1648 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 1649 | let _ = fs::remove_file(&binary); |
| 1650 | return Err(format!( |
| 1651 | "{} [{}]: OPT_EQ comparison compile failed:\n{}", |
| 1652 | filename, opt_flag, stderr |
| 1653 | )); |
| 1654 | } |
| 1655 | |
| 1656 | fs::create_dir_all(&sandbox).map_err(|e| { |
| 1657 | format!( |
| 1658 | "{}: cannot create OPT_EQ sandbox dir {}: {}", |
| 1659 | filename, |
| 1660 | sandbox.display(), |
| 1661 | e |
| 1662 | ) |
| 1663 | })?; |
| 1664 | let snapshot = run_binary_in_sandbox(&binary, &sandbox, filename)?; |
| 1665 | let _ = fs::remove_file(&binary); |
| 1666 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 1667 | let _ = fs::remove_dir_all(&sandbox); |
| 1668 | Ok(snapshot) |
| 1669 | } |
| 1670 | |
| 1671 | fn render_opt_eq_components(components: &[OptEqComponent]) -> String { |
| 1672 | components |
| 1673 | .iter() |
| 1674 | .map(|component| match component { |
| 1675 | OptEqComponent::Stdout => "stdout", |
| 1676 | OptEqComponent::Stderr => "stderr", |
| 1677 | OptEqComponent::Exit => "exit", |
| 1678 | OptEqComponent::Asm => "asm", |
| 1679 | }) |
| 1680 | .collect::<Vec<_>>() |
| 1681 | .join("|") |
| 1682 | } |
| 1683 | |
| 1684 | fn compare_opt_eq_runtime_components( |
| 1685 | baseline: &RunSnapshot, |
| 1686 | other: &RunSnapshot, |
| 1687 | components: &[OptEqComponent], |
| 1688 | ) -> Option<String> { |
| 1689 | for component in components { |
| 1690 | match component { |
| 1691 | OptEqComponent::Exit if baseline.exit_code != other.exit_code => { |
| 1692 | return Some(format!( |
| 1693 | "exit mismatch: baseline {}, other {}", |
| 1694 | baseline.exit_code, other.exit_code |
| 1695 | )) |
| 1696 | } |
| 1697 | OptEqComponent::Stdout if baseline.stdout != other.stdout => { |
| 1698 | return Some(format!( |
| 1699 | "stdout mismatch:\nbaseline:\n{}\nother:\n{}", |
| 1700 | baseline.stdout, other.stdout |
| 1701 | )) |
| 1702 | } |
| 1703 | OptEqComponent::Stderr if baseline.stderr != other.stderr => { |
| 1704 | return Some(format!( |
| 1705 | "stderr mismatch:\nbaseline:\n{}\nother:\n{}", |
| 1706 | baseline.stderr, other.stderr |
| 1707 | )) |
| 1708 | } |
| 1709 | _ => {} |
| 1710 | } |
| 1711 | } |
| 1712 | None |
| 1713 | } |
| 1714 | |
| 1715 | fn run_opt_eq_rules( |
| 1716 | compiler: &Path, |
| 1717 | source: &Path, |
| 1718 | opt_flag: &str, |
| 1719 | filename: &str, |
| 1720 | baseline_snapshot: &RunSnapshot, |
| 1721 | rules: &[OptEqRule], |
| 1722 | ) -> Result<(), String> { |
| 1723 | for rule in rules { |
| 1724 | if rule.opt_flags.first().map(String::as_str) != Some(opt_flag) { |
| 1725 | continue; |
| 1726 | } |
| 1727 | |
| 1728 | let baseline_asm = if rule.components.contains(&OptEqComponent::Asm) { |
| 1729 | Some(compile_stage_bytes( |
| 1730 | compiler, |
| 1731 | source, |
| 1732 | opt_flag, |
| 1733 | ReproStage::Asm, |
| 1734 | )?) |
| 1735 | } else { |
| 1736 | None |
| 1737 | }; |
| 1738 | |
| 1739 | for compare_opt in rule.opt_flags.iter().skip(1) { |
| 1740 | if rule |
| 1741 | .components |
| 1742 | .iter() |
| 1743 | .any(|component| *component != OptEqComponent::Asm) |
| 1744 | { |
| 1745 | let other = compile_and_run_snapshot(compiler, source, compare_opt, filename)?; |
| 1746 | if let Some(detail) = |
| 1747 | compare_opt_eq_runtime_components(baseline_snapshot, &other, &rule.components) |
| 1748 | { |
| 1749 | return Err(format!( |
| 1750 | "{} [{}]: OPT_EQ({} => {}) failed comparing {} to {}: {}", |
| 1751 | filename, |
| 1752 | opt_flag, |
| 1753 | rule.opt_flags |
| 1754 | .iter() |
| 1755 | .map(|flag| flag.trim_start_matches('-')) |
| 1756 | .collect::<Vec<_>>() |
| 1757 | .join(","), |
| 1758 | render_opt_eq_components(&rule.components), |
| 1759 | opt_flag, |
| 1760 | compare_opt, |
| 1761 | detail, |
| 1762 | )); |
| 1763 | } |
| 1764 | } |
| 1765 | |
| 1766 | if let Some(baseline_asm) = &baseline_asm { |
| 1767 | let other_asm = |
| 1768 | compile_stage_bytes(compiler, source, compare_opt, ReproStage::Asm)?; |
| 1769 | if baseline_asm != &other_asm { |
| 1770 | return Err(format!( |
| 1771 | "{} [{}]: OPT_EQ({} => {}) failed comparing {} to {}: assembly output differed", |
| 1772 | filename, |
| 1773 | opt_flag, |
| 1774 | rule |
| 1775 | .opt_flags |
| 1776 | .iter() |
| 1777 | .map(|flag| flag.trim_start_matches('-')) |
| 1778 | .collect::<Vec<_>>() |
| 1779 | .join(","), |
| 1780 | render_opt_eq_components(&rule.components), |
| 1781 | opt_flag, |
| 1782 | compare_opt, |
| 1783 | )); |
| 1784 | } |
| 1785 | } |
| 1786 | } |
| 1787 | } |
| 1788 | |
| 1789 | Ok(()) |
| 1790 | } |
| 1791 | |
| 1792 | fn render_phase_surfaces(surfaces: &[PhaseSurface]) -> String { |
| 1793 | surfaces |
| 1794 | .iter() |
| 1795 | .map(|surface| match surface { |
| 1796 | PhaseSurface::Ir => "ir", |
| 1797 | PhaseSurface::Asm => "asm", |
| 1798 | PhaseSurface::Obj => "obj", |
| 1799 | PhaseSurface::Clean => "clean", |
| 1800 | PhaseSurface::Repro => "repro", |
| 1801 | }) |
| 1802 | .collect::<Vec<_>>() |
| 1803 | .join("|") |
| 1804 | } |
| 1805 | |
| 1806 | fn run_phase_triangulation( |
| 1807 | compiler: &Path, |
| 1808 | source: &Path, |
| 1809 | opt_flag: &str, |
| 1810 | filename: &str, |
| 1811 | triangulation: &PhaseTriangulation, |
| 1812 | declared_runtime_paths: &BTreeMap<String, String>, |
| 1813 | ) -> Result<(), String> { |
| 1814 | let require_clean = triangulation.surfaces.contains(&PhaseSurface::Clean); |
| 1815 | let require_repro = triangulation.surfaces.contains(&PhaseSurface::Repro); |
| 1816 | for surface in &triangulation.surfaces { |
| 1817 | match surface { |
| 1818 | PhaseSurface::Ir | PhaseSurface::Asm | PhaseSurface::Obj => { |
| 1819 | let artifact = |
| 1820 | compile_phase_artifact(compiler, source, opt_flag, *surface, filename)?; |
| 1821 | if artifact.output_bytes.is_empty() { |
| 1822 | let surface_name = match surface { |
| 1823 | PhaseSurface::Ir => "IR", |
| 1824 | PhaseSurface::Asm => "assembly", |
| 1825 | PhaseSurface::Obj => "object", |
| 1826 | PhaseSurface::Clean => unreachable!(), |
| 1827 | PhaseSurface::Repro => unreachable!(), |
| 1828 | }; |
| 1829 | return Err(format!( |
| 1830 | "{}:{}: PHASE_TRIANGULATE({}) produced empty {} output at {}", |
| 1831 | filename, |
| 1832 | triangulation.line_num, |
| 1833 | render_phase_surfaces(&triangulation.surfaces), |
| 1834 | surface_name, |
| 1835 | opt_flag, |
| 1836 | )); |
| 1837 | } |
| 1838 | |
| 1839 | if require_clean { |
| 1840 | let file_keys: Vec<&str> = artifact |
| 1841 | .sandbox_files |
| 1842 | .keys() |
| 1843 | .map(|key| key.as_str()) |
| 1844 | .collect(); |
| 1845 | if file_keys != vec![artifact.output_rel_path.as_str()] { |
| 1846 | let runtime_leaks = declared_runtime_paths |
| 1847 | .iter() |
| 1848 | .filter(|(path, _)| artifact.sandbox_files.contains_key(path.as_str())) |
| 1849 | .map(|(path, directive)| format!("{} ({})", path, directive)) |
| 1850 | .collect::<Vec<_>>(); |
| 1851 | if !runtime_leaks.is_empty() { |
| 1852 | return Err(format!( |
| 1853 | "{}:{}: PHASE_TRIANGULATE({}) failed at {}: compile-only phase created runtime side-effect files: {}", |
| 1854 | filename, |
| 1855 | triangulation.line_num, |
| 1856 | render_phase_surfaces(&triangulation.surfaces), |
| 1857 | opt_flag, |
| 1858 | runtime_leaks.join(", "), |
| 1859 | )); |
| 1860 | } |
| 1861 | return Err(format!( |
| 1862 | "{}:{}: PHASE_TRIANGULATE({}) failed at {}: compile-only phase left unexpected files {:?} (expected only '{}')", |
| 1863 | filename, |
| 1864 | triangulation.line_num, |
| 1865 | render_phase_surfaces(&triangulation.surfaces), |
| 1866 | opt_flag, |
| 1867 | file_keys, |
| 1868 | artifact.output_rel_path, |
| 1869 | )); |
| 1870 | } |
| 1871 | } |
| 1872 | if require_repro { |
| 1873 | let second_artifact = |
| 1874 | compile_phase_artifact(compiler, source, opt_flag, *surface, filename)?; |
| 1875 | if require_clean { |
| 1876 | let second_file_keys: Vec<&str> = second_artifact |
| 1877 | .sandbox_files |
| 1878 | .keys() |
| 1879 | .map(|key| key.as_str()) |
| 1880 | .collect(); |
| 1881 | if second_file_keys != vec![second_artifact.output_rel_path.as_str()] { |
| 1882 | return Err(format!( |
| 1883 | "{}:{}: PHASE_TRIANGULATE({}) failed at {}: repeated compile-only phase left unexpected files {:?} (expected only '{}')", |
| 1884 | filename, |
| 1885 | triangulation.line_num, |
| 1886 | render_phase_surfaces(&triangulation.surfaces), |
| 1887 | opt_flag, |
| 1888 | second_file_keys, |
| 1889 | second_artifact.output_rel_path, |
| 1890 | )); |
| 1891 | } |
| 1892 | } |
| 1893 | if artifact.output_bytes != second_artifact.output_bytes { |
| 1894 | let surface_name = match surface { |
| 1895 | PhaseSurface::Ir => "IR", |
| 1896 | PhaseSurface::Asm => "assembly", |
| 1897 | PhaseSurface::Obj => "object", |
| 1898 | PhaseSurface::Clean => unreachable!(), |
| 1899 | PhaseSurface::Repro => unreachable!(), |
| 1900 | }; |
| 1901 | return Err(format!( |
| 1902 | "{}:{}: PHASE_TRIANGULATE({}) failed at {}: {} output changed across repeated compile-only runs", |
| 1903 | filename, |
| 1904 | triangulation.line_num, |
| 1905 | render_phase_surfaces(&triangulation.surfaces), |
| 1906 | opt_flag, |
| 1907 | surface_name, |
| 1908 | )); |
| 1909 | } |
| 1910 | } |
| 1911 | } |
| 1912 | PhaseSurface::Clean | PhaseSurface::Repro => {} |
| 1913 | } |
| 1914 | } |
| 1915 | Ok(()) |
| 1916 | } |
| 1917 | |
| 1918 | fn describe_run_difference(first: &RunSnapshot, second: &RunSnapshot) -> String { |
| 1919 | if first.exit_code != second.exit_code { |
| 1920 | return format!( |
| 1921 | "exit code mismatch: first {}, second {}", |
| 1922 | first.exit_code, second.exit_code |
| 1923 | ); |
| 1924 | } |
| 1925 | if first.stdout != second.stdout { |
| 1926 | return format!( |
| 1927 | "stdout mismatch:\nfirst:\n{}\nsecond:\n{}", |
| 1928 | first.stdout, second.stdout |
| 1929 | ); |
| 1930 | } |
| 1931 | if first.stderr != second.stderr { |
| 1932 | return format!( |
| 1933 | "stderr mismatch:\nfirst:\n{}\nsecond:\n{}", |
| 1934 | first.stderr, second.stderr |
| 1935 | ); |
| 1936 | } |
| 1937 | if first.files.keys().collect::<Vec<_>>() != second.files.keys().collect::<Vec<_>>() { |
| 1938 | return format!( |
| 1939 | "sandbox file set mismatch: first {:?}, second {:?}", |
| 1940 | first.files.keys().collect::<Vec<_>>(), |
| 1941 | second.files.keys().collect::<Vec<_>>() |
| 1942 | ); |
| 1943 | } |
| 1944 | for (path, first_bytes) in &first.files { |
| 1945 | let second_bytes = &second.files[path]; |
| 1946 | if first_bytes != second_bytes { |
| 1947 | return format!("sandbox file contents differ for '{}'", path); |
| 1948 | } |
| 1949 | } |
| 1950 | "unknown runtime observation mismatch".to_string() |
| 1951 | } |
| 1952 | |
| 1953 | /// What happened when we ran a test program. |
| 1954 | #[derive(Debug)] |
| 1955 | enum TestOutcome { |
| 1956 | /// Compiled, ran, all CHECKs matched. No XFAIL annotation present. |
| 1957 | Pass, |
| 1958 | /// Marked XFAIL and failed somewhere — this is the expected |
| 1959 | /// outcome for an open audit finding. The reason is the XFAIL |
| 1960 | /// annotation text plus the underlying failure detail. |
| 1961 | Xfail(String), |
| 1962 | /// Marked XFAIL but unexpectedly succeeded. Loud failure: the |
| 1963 | /// underlying bug is fixed and the XFAIL annotation should be |
| 1964 | /// removed so the program becomes a regular regression test. |
| 1965 | Xpass(String), |
| 1966 | /// No XFAIL annotation, and the program failed somewhere. |
| 1967 | Fail(String), |
| 1968 | } |
| 1969 | |
| 1970 | /// Run a single test program: compile at the given optimization level, |
| 1971 | /// execute, check output. Honors `! XFAIL:` annotations. |
| 1972 | fn run_test(compiler: &Path, source: &Path, opt_flag: &str) -> TestOutcome { |
| 1973 | let filename = source.file_name().unwrap().to_str().unwrap(); |
| 1974 | let source_text = match fs::read_to_string(source) { |
| 1975 | Ok(s) => s, |
| 1976 | Err(e) => return TestOutcome::Fail(format!("{}: cannot read: {}", filename, e)), |
| 1977 | }; |
| 1978 | |
| 1979 | let xfail_reason = extract_xfail(&source_text); |
| 1980 | let error_expected = extract_error_expected(&source_text); |
| 1981 | let error_span = match extract_error_span(&source_text, filename) { |
| 1982 | Ok(span) => span, |
| 1983 | Err(e) => return TestOutcome::Fail(e), |
| 1984 | }; |
| 1985 | let checks = extract_checks(&source_text); |
| 1986 | let stderr_checks = extract_stderr_checks(&source_text); |
| 1987 | let expected_exit_code = match extract_exit_code(&source_text, filename) { |
| 1988 | Ok(code) => code, |
| 1989 | Err(e) => return TestOutcome::Fail(e), |
| 1990 | }; |
| 1991 | let ir_checks = extract_ir_checks(&source_text); |
| 1992 | let asm_checks = extract_asm_checks(&source_text); |
| 1993 | let file_checks = |
| 1994 | match extract_file_checks(&source_text, filename, "! FILE_CHECK:", "! FILE_NOT:") { |
| 1995 | Ok(checks) => checks, |
| 1996 | Err(e) => return TestOutcome::Fail(e), |
| 1997 | }; |
| 1998 | let file_presence_checks = match extract_file_presence_checks(&source_text, filename) { |
| 1999 | Ok(checks) => checks, |
| 2000 | Err(e) => return TestOutcome::Fail(e), |
| 2001 | }; |
| 2002 | let file_line_count_checks = match extract_file_line_count_checks(&source_text, filename) { |
| 2003 | Ok(checks) => checks, |
| 2004 | Err(e) => return TestOutcome::Fail(e), |
| 2005 | }; |
| 2006 | let file_rerun_mode_checks = match extract_file_rerun_mode_checks(&source_text, filename) { |
| 2007 | Ok(checks) => checks, |
| 2008 | Err(e) => return TestOutcome::Fail(e), |
| 2009 | }; |
| 2010 | let file_set_exact = match extract_file_set_exact(&source_text, filename) { |
| 2011 | Ok(check) => check, |
| 2012 | Err(e) => return TestOutcome::Fail(e), |
| 2013 | }; |
| 2014 | let repro_checks = match extract_repro_checks(&source_text, filename) { |
| 2015 | Ok(checks) => checks, |
| 2016 | Err(e) => return TestOutcome::Fail(e), |
| 2017 | }; |
| 2018 | let opt_eq_rules = match extract_opt_eq_rules(&source_text, filename) { |
| 2019 | Ok(rules) => rules, |
| 2020 | Err(e) => return TestOutcome::Fail(e), |
| 2021 | }; |
| 2022 | let phase_triangulation = match extract_phase_triangulation(&source_text, filename) { |
| 2023 | Ok(rule) => rule, |
| 2024 | Err(e) => return TestOutcome::Fail(e), |
| 2025 | }; |
| 2026 | if checks.is_empty() |
| 2027 | && stderr_checks.is_empty() |
| 2028 | && ir_checks.is_empty() |
| 2029 | && asm_checks.is_empty() |
| 2030 | && file_checks.is_empty() |
| 2031 | && file_presence_checks.is_empty() |
| 2032 | && file_line_count_checks.is_empty() |
| 2033 | && file_rerun_mode_checks.is_empty() |
| 2034 | && file_set_exact.is_none() |
| 2035 | && repro_checks.is_empty() |
| 2036 | && opt_eq_rules.is_empty() |
| 2037 | && phase_triangulation.is_none() |
| 2038 | && expected_exit_code.is_none() |
| 2039 | && error_span.is_none() |
| 2040 | && xfail_reason.is_none() |
| 2041 | && error_expected.is_none() |
| 2042 | { |
| 2043 | // Programs with no runtime or shape assertions, no XFAIL marker, |
| 2044 | // and no ERROR marker are mis-configured tests, not test failures. |
| 2045 | return TestOutcome::Fail(format!( |
| 2046 | "{}: no CHECK / STDERR_CHECK / EXIT_CODE / IR_CHECK / ASM_CHECK / FILE_CHECK / FILE_EXISTS / FILE_MISSING / FILE_LINE_COUNT / FILE_RERUN_MODE / FILE_SET_EXACT / REPRO_CHECK / XFAIL / ERROR_EXPECTED / ERROR_SPAN annotations", |
| 2047 | filename, |
| 2048 | )); |
| 2049 | } |
| 2050 | if error_span.is_some() && error_expected.is_none() { |
| 2051 | return TestOutcome::Fail(format!( |
| 2052 | "{}: ERROR_SPAN requires ERROR_EXPECTED so the harness knows which compile failure to validate", |
| 2053 | filename, |
| 2054 | )); |
| 2055 | } |
| 2056 | if phase_triangulation.is_some() && error_expected.is_some() { |
| 2057 | return TestOutcome::Fail(format!( |
| 2058 | "{}: PHASE_TRIANGULATE is for successful compile/run tests and cannot be combined with ERROR_EXPECTED", |
| 2059 | filename, |
| 2060 | )); |
| 2061 | } |
| 2062 | if !file_rerun_mode_checks.is_empty() && repro_checks.contains(&ReproStage::RunSameSandbox) { |
| 2063 | return TestOutcome::Fail(format!( |
| 2064 | "{}: FILE_RERUN_MODE is a same-sandbox rerun oracle and cannot be combined with REPRO_CHECK(run_same_sandbox)", |
| 2065 | filename, |
| 2066 | )); |
| 2067 | } |
| 2068 | |
| 2069 | // Try the compile/run/check pipeline. Any failure path returns |
| 2070 | // an Err with a message; success returns Ok. |
| 2071 | let multifile_segments = split_multifile_segments(&source_text); |
| 2072 | let multifile_link_order = extract_multifile_link(&source_text); |
| 2073 | |
| 2074 | let inner = || -> Result<(), String> { |
| 2075 | // Use a per-(file,level) binary path so concurrent jobs |
| 2076 | // and successive runs at different levels don't stomp each other. |
| 2077 | let stem = source.file_stem().unwrap().to_str().unwrap(); |
| 2078 | let level = opt_flag.trim_start_matches('-'); |
| 2079 | let binary = unique_temp_path("test_bin", stem, level, ""); |
| 2080 | |
| 2081 | // ---- Multi-file path: split, compile each to .o, link ---- |
| 2082 | if let Some(segments) = &multifile_segments { |
| 2083 | let build_dir = unique_temp_path("multifile_build", stem, level, ""); |
| 2084 | fs::create_dir_all(&build_dir) |
| 2085 | .map_err(|e| format!("{}: cannot create multifile build dir: {}", filename, e))?; |
| 2086 | |
| 2087 | // Write each segment to its own .f90. |
| 2088 | for seg in segments { |
| 2089 | let seg_path = build_dir.join(&seg.name); |
| 2090 | fs::write(&seg_path, &seg.source).map_err(|e| { |
| 2091 | format!("{}: cannot write segment {}: {}", filename, seg.name, e) |
| 2092 | })?; |
| 2093 | } |
| 2094 | |
| 2095 | // Determine compilation order from MULTIFILE_LINK or declaration order. |
| 2096 | let ordered_names: Vec<&str> = if let Some(link_order) = &multifile_link_order { |
| 2097 | link_order.iter().map(|s| s.as_str()).collect() |
| 2098 | } else { |
| 2099 | segments.iter().map(|s| s.name.as_str()).collect() |
| 2100 | }; |
| 2101 | |
| 2102 | // Compile each in order (dependencies first). |
| 2103 | let mut objects = Vec::new(); |
| 2104 | let mut compile_error: Option<String> = None; |
| 2105 | for name in &ordered_names { |
| 2106 | let seg_f90 = build_dir.join(name); |
| 2107 | if !seg_f90.exists() { |
| 2108 | let msg = format!( |
| 2109 | "{}: MULTIFILE_LINK references '{}' but no !--- file: segment defines it", |
| 2110 | filename, name, |
| 2111 | ); |
| 2112 | compile_error = Some(msg); |
| 2113 | break; |
| 2114 | } |
| 2115 | let seg_o = build_dir.join(format!( |
| 2116 | "{}.o", |
| 2117 | Path::new(name).file_stem().unwrap().to_str().unwrap() |
| 2118 | )); |
| 2119 | if let Err(e) = compile_to_object(compiler, &seg_f90, &seg_o, opt_flag, &build_dir) |
| 2120 | { |
| 2121 | compile_error = Some(format!("{} [{}]: {}", filename, opt_flag, e)); |
| 2122 | break; |
| 2123 | } |
| 2124 | objects.push(seg_o); |
| 2125 | } |
| 2126 | |
| 2127 | // Handle ERROR_EXPECTED for multi-file tests. |
| 2128 | if let Some(err_msg) = compile_error { |
| 2129 | let _ = fs::remove_dir_all(&build_dir); |
| 2130 | if let Some(expected) = &error_expected { |
| 2131 | if err_msg.contains(expected.as_str()) { |
| 2132 | return Ok(()); |
| 2133 | } |
| 2134 | return Err(format!( |
| 2135 | "{} [{}]: ERROR_EXPECTED({}) but compile error did not contain it.\n\ |
| 2136 | Actual error:\n{}", |
| 2137 | filename, opt_flag, expected, err_msg, |
| 2138 | )); |
| 2139 | } |
| 2140 | return Err(err_msg); |
| 2141 | } |
| 2142 | |
| 2143 | // ERROR_EXPECTED but compilation succeeded — that's a failure. |
| 2144 | if error_expected.is_some() { |
| 2145 | let _ = fs::remove_dir_all(&build_dir); |
| 2146 | return Err(format!( |
| 2147 | "{} [{}]: ERROR_EXPECTED but all segments compiled successfully", |
| 2148 | filename, opt_flag, |
| 2149 | )); |
| 2150 | } |
| 2151 | |
| 2152 | // Link all .o files into a binary. |
| 2153 | link_objects(&objects, &binary) |
| 2154 | .map_err(|e| format!("{} [{}]: {}", filename, opt_flag, e))?; |
| 2155 | |
| 2156 | let _ = fs::remove_dir_all(&build_dir); |
| 2157 | } else { |
| 2158 | // ---- Single-file path ---- |
| 2159 | let source_path = fs::canonicalize(source).map_err(|e| { |
| 2160 | format!( |
| 2161 | "{}: cannot canonicalize source {}: {}", |
| 2162 | filename, |
| 2163 | source.display(), |
| 2164 | e |
| 2165 | ) |
| 2166 | })?; |
| 2167 | let compile_sandbox = unique_temp_path("compile_sandbox", stem, level, ""); |
| 2168 | fs::create_dir_all(&compile_sandbox).map_err(|e| { |
| 2169 | format!( |
| 2170 | "{}: cannot create compile sandbox dir {}: {}", |
| 2171 | filename, |
| 2172 | compile_sandbox.display(), |
| 2173 | e |
| 2174 | ) |
| 2175 | })?; |
| 2176 | let compile = Command::new(compiler) |
| 2177 | .current_dir(&compile_sandbox) |
| 2178 | .args([ |
| 2179 | source_path.to_str().unwrap(), |
| 2180 | opt_flag, |
| 2181 | "-o", |
| 2182 | binary.to_str().unwrap(), |
| 2183 | ]) |
| 2184 | .output() |
| 2185 | .map_err(|e| format!("{}: cannot run compiler: {}", filename, e))?; |
| 2186 | |
| 2187 | // ERROR_EXPECTED branch: compilation MUST fail with the |
| 2188 | // expected stderr substring. CHECKs are ignored. |
| 2189 | if let Some(expected) = &error_expected { |
| 2190 | if compile.status.success() { |
| 2191 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 2192 | let _ = fs::remove_file(&binary); |
| 2193 | return Err(format!( |
| 2194 | "{} [{}]: ERROR_EXPECTED({}) but compilation succeeded", |
| 2195 | filename, opt_flag, expected, |
| 2196 | )); |
| 2197 | } |
| 2198 | let stderr = String::from_utf8_lossy(&compile.stderr); |
| 2199 | if !stderr.contains(expected.as_str()) { |
| 2200 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 2201 | return Err(format!( |
| 2202 | "{} [{}]: ERROR_EXPECTED({}) but stderr did not contain it.\n\ |
| 2203 | Full stderr:\n{}", |
| 2204 | filename, opt_flag, expected, stderr, |
| 2205 | )); |
| 2206 | } |
| 2207 | if let Some(expected_span) = error_span { |
| 2208 | if !diagnostic_contains_span(&stderr, expected_span) { |
| 2209 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 2210 | return Err(format!( |
| 2211 | "{} [{}]: ERROR_SPAN({}:{}) but stderr did not contain that location.\n\ |
| 2212 | Full stderr:\n{}", |
| 2213 | filename, opt_flag, expected_span.line, expected_span.col, stderr, |
| 2214 | )); |
| 2215 | } |
| 2216 | } |
| 2217 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 2218 | return Ok(()); |
| 2219 | } |
| 2220 | |
| 2221 | if !compile.status.success() { |
| 2222 | let stderr = String::from_utf8_lossy(&compile.stderr); |
| 2223 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 2224 | return Err(format!( |
| 2225 | "{} [{}]: compilation failed:\n{}", |
| 2226 | filename, opt_flag, stderr, |
| 2227 | )); |
| 2228 | } |
| 2229 | let _ = fs::remove_dir_all(&compile_sandbox); |
| 2230 | } |
| 2231 | |
| 2232 | // Per-(file,level) sandbox directory. Test programs that touch the |
| 2233 | // filesystem (open(file=...)) write into this directory via relative |
| 2234 | // paths, which keeps the parallel test_programs_end_to_end_o* |
| 2235 | // threads from racing on shared paths. |
| 2236 | let sandbox = unique_temp_path("test_sandbox", stem, level, ""); |
| 2237 | fs::create_dir_all(&sandbox).map_err(|e| { |
| 2238 | format!( |
| 2239 | "{}: cannot create sandbox dir {}: {}", |
| 2240 | filename, |
| 2241 | sandbox.display(), |
| 2242 | e |
| 2243 | ) |
| 2244 | })?; |
| 2245 | |
| 2246 | let snapshot = run_binary_in_sandbox(&binary, &sandbox, filename)?; |
| 2247 | |
| 2248 | let actual_exit_code = snapshot.exit_code; |
| 2249 | let stderr = &snapshot.stderr; |
| 2250 | let expected_exit_code = expected_exit_code.unwrap_or(0); |
| 2251 | if actual_exit_code != expected_exit_code { |
| 2252 | let _ = fs::remove_file(&binary); |
| 2253 | let _ = fs::remove_dir_all(&sandbox); |
| 2254 | return Err(format!( |
| 2255 | "{} [{}]: execution exit mismatch: expected {}, got {}\n\ |
| 2256 | stderr:\n{}", |
| 2257 | filename, opt_flag, expected_exit_code, actual_exit_code, stderr, |
| 2258 | )); |
| 2259 | } |
| 2260 | |
| 2261 | let stdout = &snapshot.stdout; |
| 2262 | let label = format!("{} [{}]", filename, opt_flag); |
| 2263 | if let Err(e) = match_checks(&checks, &stdout, &label, "CHECK") { |
| 2264 | let _ = fs::remove_file(&binary); |
| 2265 | let _ = fs::remove_dir_all(&sandbox); |
| 2266 | return Err(e); |
| 2267 | } |
| 2268 | if let Err(e) = match_checks(&stderr_checks, &stderr, &label, "STDERR_CHECK") { |
| 2269 | let _ = fs::remove_file(&binary); |
| 2270 | let _ = fs::remove_dir_all(&sandbox); |
| 2271 | return Err(e); |
| 2272 | } |
| 2273 | if let Err(e) = match_file_checks(&file_checks, &snapshot.files, &label) { |
| 2274 | let _ = fs::remove_file(&binary); |
| 2275 | let _ = fs::remove_dir_all(&sandbox); |
| 2276 | return Err(e); |
| 2277 | } |
| 2278 | if let Err(e) = match_file_presence_checks(&file_presence_checks, &snapshot.files, &label) { |
| 2279 | let _ = fs::remove_file(&binary); |
| 2280 | let _ = fs::remove_dir_all(&sandbox); |
| 2281 | return Err(e); |
| 2282 | } |
| 2283 | if let Err(e) = |
| 2284 | match_file_line_count_checks(&file_line_count_checks, &snapshot.files, &label) |
| 2285 | { |
| 2286 | let _ = fs::remove_file(&binary); |
| 2287 | let _ = fs::remove_dir_all(&sandbox); |
| 2288 | return Err(e); |
| 2289 | } |
| 2290 | if let Some(check) = &file_set_exact { |
| 2291 | if let Err(e) = match_file_set_exact(check, &snapshot.files, &label) { |
| 2292 | let _ = fs::remove_file(&binary); |
| 2293 | let _ = fs::remove_dir_all(&sandbox); |
| 2294 | return Err(e); |
| 2295 | } |
| 2296 | } |
| 2297 | if !file_rerun_mode_checks.is_empty() { |
| 2298 | let second = run_binary_in_sandbox(&binary, &sandbox, filename)?; |
| 2299 | if snapshot.exit_code != second.exit_code { |
| 2300 | let _ = fs::remove_file(&binary); |
| 2301 | let _ = fs::remove_dir_all(&sandbox); |
| 2302 | return Err(format!( |
| 2303 | "{} [{}]: FILE_RERUN_MODE rerun exit mismatch: first {}, second {}", |
| 2304 | filename, opt_flag, snapshot.exit_code, second.exit_code |
| 2305 | )); |
| 2306 | } |
| 2307 | if snapshot.stdout != second.stdout { |
| 2308 | let _ = fs::remove_file(&binary); |
| 2309 | let _ = fs::remove_dir_all(&sandbox); |
| 2310 | return Err(format!( |
| 2311 | "{} [{}]: FILE_RERUN_MODE rerun stdout mismatch:\nfirst:\n{}\nsecond:\n{}", |
| 2312 | filename, opt_flag, snapshot.stdout, second.stdout |
| 2313 | )); |
| 2314 | } |
| 2315 | if snapshot.stderr != second.stderr { |
| 2316 | let _ = fs::remove_file(&binary); |
| 2317 | let _ = fs::remove_dir_all(&sandbox); |
| 2318 | return Err(format!( |
| 2319 | "{} [{}]: FILE_RERUN_MODE rerun stderr mismatch:\nfirst:\n{}\nsecond:\n{}", |
| 2320 | filename, opt_flag, snapshot.stderr, second.stderr |
| 2321 | )); |
| 2322 | } |
| 2323 | if let Err(e) = |
| 2324 | match_file_rerun_mode_checks(&file_rerun_mode_checks, &snapshot, &second, &label) |
| 2325 | { |
| 2326 | let _ = fs::remove_file(&binary); |
| 2327 | let _ = fs::remove_dir_all(&sandbox); |
| 2328 | return Err(e); |
| 2329 | } |
| 2330 | } |
| 2331 | for stage in &repro_checks { |
| 2332 | match stage { |
| 2333 | ReproStage::Asm | ReproStage::Obj => { |
| 2334 | let first = compile_stage_bytes(compiler, source, opt_flag, *stage)?; |
| 2335 | let second = compile_stage_bytes(compiler, source, opt_flag, *stage)?; |
| 2336 | if first != second { |
| 2337 | let stage_name = match stage { |
| 2338 | ReproStage::Asm => "asm", |
| 2339 | ReproStage::Obj => "obj", |
| 2340 | ReproStage::Run => unreachable!(), |
| 2341 | ReproStage::RunSameSandbox => unreachable!(), |
| 2342 | }; |
| 2343 | let _ = fs::remove_file(&binary); |
| 2344 | let _ = fs::remove_dir_all(&sandbox); |
| 2345 | return Err(format!( |
| 2346 | "{} [{}]: REPRO_CHECK({}) failed: two compilations produced different {} bytes", |
| 2347 | filename, opt_flag, stage_name, stage_name |
| 2348 | )); |
| 2349 | } |
| 2350 | } |
| 2351 | ReproStage::Run => { |
| 2352 | let repro_sandbox = |
| 2353 | unique_temp_path("test_sandbox", stem, &format!("{}_repro", level), ""); |
| 2354 | fs::create_dir_all(&repro_sandbox).map_err(|e| { |
| 2355 | format!( |
| 2356 | "{}: cannot create repro sandbox dir {}: {}", |
| 2357 | filename, |
| 2358 | repro_sandbox.display(), |
| 2359 | e |
| 2360 | ) |
| 2361 | })?; |
| 2362 | let second = run_binary_in_sandbox(&binary, &repro_sandbox, filename)?; |
| 2363 | let _ = fs::remove_dir_all(&repro_sandbox); |
| 2364 | if snapshot != second { |
| 2365 | let detail = describe_run_difference(&snapshot, &second); |
| 2366 | let _ = fs::remove_file(&binary); |
| 2367 | let _ = fs::remove_dir_all(&sandbox); |
| 2368 | return Err(format!( |
| 2369 | "{} [{}]: REPRO_CHECK(run) failed: {}", |
| 2370 | filename, opt_flag, detail |
| 2371 | )); |
| 2372 | } |
| 2373 | } |
| 2374 | ReproStage::RunSameSandbox => { |
| 2375 | let second = run_binary_in_sandbox(&binary, &sandbox, filename)?; |
| 2376 | if snapshot != second { |
| 2377 | let detail = describe_run_difference(&snapshot, &second); |
| 2378 | let _ = fs::remove_file(&binary); |
| 2379 | let _ = fs::remove_dir_all(&sandbox); |
| 2380 | return Err(format!( |
| 2381 | "{} [{}]: REPRO_CHECK(run_same_sandbox) failed: {}", |
| 2382 | filename, opt_flag, detail |
| 2383 | )); |
| 2384 | } |
| 2385 | } |
| 2386 | } |
| 2387 | } |
| 2388 | let _ = fs::remove_file(&binary); |
| 2389 | let _ = fs::remove_dir_all(&sandbox); |
| 2390 | |
| 2391 | // IR shape assertions: only at -O0, where the IR is |
| 2392 | // stable. Optimization passes (mem2reg, LICM, CSE, etc.) |
| 2393 | // erase the very shape we want to pin, so running these |
| 2394 | // at -O1+ would always fail. The runtime CHECKs above |
| 2395 | // continue to run at every level. |
| 2396 | if !ir_checks.is_empty() && opt_flag == "-O0" { |
| 2397 | let ir_dest = unique_temp_path("test_ir", stem, "o0", ".txt"); |
| 2398 | let ir_compile = Command::new(compiler) |
| 2399 | .args([ |
| 2400 | source.to_str().unwrap(), |
| 2401 | "-O0", |
| 2402 | "--emit-ir", |
| 2403 | "-o", |
| 2404 | ir_dest.to_str().unwrap(), |
| 2405 | ]) |
| 2406 | .output() |
| 2407 | .map_err(|e| format!("{}: cannot run --emit-ir: {}", filename, e))?; |
| 2408 | if !ir_compile.status.success() { |
| 2409 | let stderr = String::from_utf8_lossy(&ir_compile.stderr); |
| 2410 | return Err(format!( |
| 2411 | "{} [{}]: --emit-ir compilation failed:\n{}", |
| 2412 | filename, opt_flag, stderr, |
| 2413 | )); |
| 2414 | } |
| 2415 | let ir_text = fs::read_to_string(&ir_dest) |
| 2416 | .map_err(|e| format!("{}: cannot read IR: {}", filename, e))?; |
| 2417 | let _ = fs::remove_file(&ir_dest); |
| 2418 | match_ir_checks(&ir_checks, &ir_text, &label)?; |
| 2419 | } |
| 2420 | |
| 2421 | if !asm_checks.is_empty() { |
| 2422 | let asm_dest = unique_temp_path("test_asm", stem, level, ".s"); |
| 2423 | let asm_compile = Command::new(compiler) |
| 2424 | .args([ |
| 2425 | source.to_str().unwrap(), |
| 2426 | opt_flag, |
| 2427 | "-S", |
| 2428 | "-o", |
| 2429 | asm_dest.to_str().unwrap(), |
| 2430 | ]) |
| 2431 | .output() |
| 2432 | .map_err(|e| format!("{}: cannot run -S: {}", filename, e))?; |
| 2433 | if !asm_compile.status.success() { |
| 2434 | let stderr = String::from_utf8_lossy(&asm_compile.stderr); |
| 2435 | return Err(format!( |
| 2436 | "{} [{}]: -S compilation failed:\n{}", |
| 2437 | filename, opt_flag, stderr, |
| 2438 | )); |
| 2439 | } |
| 2440 | let asm_text = fs::read_to_string(&asm_dest) |
| 2441 | .map_err(|e| format!("{}: cannot read assembly: {}", filename, e))?; |
| 2442 | let _ = fs::remove_file(&asm_dest); |
| 2443 | match_asm_checks(&asm_checks, &asm_text, &label)?; |
| 2444 | } |
| 2445 | |
| 2446 | run_opt_eq_rules( |
| 2447 | compiler, |
| 2448 | source, |
| 2449 | opt_flag, |
| 2450 | filename, |
| 2451 | &snapshot, |
| 2452 | &opt_eq_rules, |
| 2453 | )?; |
| 2454 | |
| 2455 | if let Some(triangulation) = &phase_triangulation { |
| 2456 | let declared_runtime_paths = collect_declared_runtime_paths( |
| 2457 | &file_checks, |
| 2458 | &file_presence_checks, |
| 2459 | &file_line_count_checks, |
| 2460 | &file_rerun_mode_checks, |
| 2461 | file_set_exact.as_ref(), |
| 2462 | ); |
| 2463 | run_phase_triangulation( |
| 2464 | compiler, |
| 2465 | source, |
| 2466 | opt_flag, |
| 2467 | filename, |
| 2468 | triangulation, |
| 2469 | &declared_runtime_paths, |
| 2470 | )?; |
| 2471 | } |
| 2472 | |
| 2473 | Ok(()) |
| 2474 | }; |
| 2475 | |
| 2476 | let result = inner(); |
| 2477 | match (xfail_reason, result) { |
| 2478 | (None, Ok(())) => TestOutcome::Pass, |
| 2479 | (None, Err(e)) => TestOutcome::Fail(e), |
| 2480 | (Some(reason), Err(e)) => TestOutcome::Xfail(format!("{}: {}", reason, e)), |
| 2481 | (Some(reason), Ok(())) => TestOutcome::Xpass(format!( |
| 2482 | "{} [{}]: marked XFAIL ({}) but unexpectedly passed — \ |
| 2483 | remove the XFAIL annotation", |
| 2484 | filename, opt_flag, reason, |
| 2485 | )), |
| 2486 | } |
| 2487 | } |
| 2488 | |
| 2489 | /// Discover the test programs and run each at every supported opt level. |
| 2490 | /// This enforces the correctness invariant: same source must produce |
| 2491 | /// the same output regardless of optimization level. |
| 2492 | fn run_all_at(opt_flag: &str) -> Result<(), String> { |
| 2493 | let compiler = find_compiler(); |
| 2494 | let test_dir = find_test_programs(); |
| 2495 | |
| 2496 | let mut sources: Vec<PathBuf> = fs::read_dir(&test_dir) |
| 2497 | .expect("cannot read test_programs/") |
| 2498 | .filter_map(|e| e.ok()) |
| 2499 | .map(|e| e.path()) |
| 2500 | .filter(|p| is_test_program_source(p)) |
| 2501 | .collect(); |
| 2502 | sources.sort(); |
| 2503 | |
| 2504 | assert!( |
| 2505 | !sources.is_empty(), |
| 2506 | "no Fortran sources found in test_programs/" |
| 2507 | ); |
| 2508 | |
| 2509 | let mut failures = Vec::new(); |
| 2510 | let mut passed = 0; |
| 2511 | let mut xfailed = 0; |
| 2512 | |
| 2513 | for source in &sources { |
| 2514 | let name = source.file_name().unwrap().to_str().unwrap(); |
| 2515 | match run_test(&compiler, source, opt_flag) { |
| 2516 | TestOutcome::Pass => { |
| 2517 | passed += 1; |
| 2518 | eprintln!(" PASS [{}]: {}", opt_flag, name); |
| 2519 | } |
| 2520 | TestOutcome::Xfail(detail) => { |
| 2521 | xfailed += 1; |
| 2522 | // Print the first line of the detail so we know what |
| 2523 | // the underlying failure looked like, in case the bug |
| 2524 | // class shifts. |
| 2525 | let one_line = detail.lines().next().unwrap_or(""); |
| 2526 | eprintln!(" XFAIL [{}]: {} — {}", opt_flag, name, one_line); |
| 2527 | } |
| 2528 | TestOutcome::Xpass(msg) => { |
| 2529 | eprintln!(" XPASS [{}]: {}", opt_flag, name); |
| 2530 | failures.push(msg); |
| 2531 | } |
| 2532 | TestOutcome::Fail(msg) => { |
| 2533 | eprintln!(" FAIL [{}]: {}", opt_flag, name); |
| 2534 | failures.push(msg); |
| 2535 | } |
| 2536 | } |
| 2537 | } |
| 2538 | |
| 2539 | eprintln!( |
| 2540 | "\n[{}] {} passed, {} xfailed, {} failed out of {} test programs", |
| 2541 | opt_flag, |
| 2542 | passed, |
| 2543 | xfailed, |
| 2544 | failures.len(), |
| 2545 | sources.len(), |
| 2546 | ); |
| 2547 | |
| 2548 | if failures.is_empty() { |
| 2549 | Ok(()) |
| 2550 | } else { |
| 2551 | Err(failures.join("\n\n")) |
| 2552 | } |
| 2553 | } |
| 2554 | |
| 2555 | #[test] |
| 2556 | fn test_programs_end_to_end() { |
| 2557 | if let Err(msg) = run_all_at("-O0") { |
| 2558 | panic!("Test failures at -O0:\n\n{}", msg); |
| 2559 | } |
| 2560 | } |
| 2561 | |
| 2562 | #[test] |
| 2563 | fn test_programs_end_to_end_o1() { |
| 2564 | if let Err(msg) = run_all_at("-O1") { |
| 2565 | panic!("Test failures at -O1:\n\n{}", msg); |
| 2566 | } |
| 2567 | } |
| 2568 | |
| 2569 | #[test] |
| 2570 | fn test_programs_end_to_end_o2() { |
| 2571 | if let Err(msg) = run_all_at("-O2") { |
| 2572 | panic!("Test failures at -O2:\n\n{}", msg); |
| 2573 | } |
| 2574 | } |
| 2575 | |
| 2576 | #[test] |
| 2577 | fn test_programs_end_to_end_o3() { |
| 2578 | if let Err(msg) = run_all_at("-O3") { |
| 2579 | panic!("Test failures at -O3:\n\n{}", msg); |
| 2580 | } |
| 2581 | } |
| 2582 | |
| 2583 | #[test] |
| 2584 | fn test_programs_end_to_end_os() { |
| 2585 | if let Err(msg) = run_all_at("-Os") { |
| 2586 | panic!("Test failures at -Os:\n\n{}", msg); |
| 2587 | } |
| 2588 | } |
| 2589 | |
| 2590 | #[test] |
| 2591 | fn test_programs_end_to_end_ofast() { |
| 2592 | if let Err(msg) = run_all_at("-Ofast") { |
| 2593 | panic!("Test failures at -Ofast:\n\n{}", msg); |
| 2594 | } |
| 2595 | } |
| 2596 | |
| 2597 | #[test] |
| 2598 | fn test_program_source_filter_accepts_fixed_form_extensions() { |
| 2599 | assert!(is_test_program_source(Path::new("hello.f"))); |
| 2600 | assert!(is_test_program_source(Path::new("legacy.for"))); |
| 2601 | assert!(is_test_program_source(Path::new("solver.ftn"))); |
| 2602 | assert!(is_test_program_source(Path::new("main.f90"))); |
| 2603 | assert!(!is_test_program_source(Path::new("notes.txt"))); |
| 2604 | } |
| 2605 | |
| 2606 | /// Determinism regression: compile a program twice at -O2 and |
| 2607 | /// require byte-identical machine code. Codegen non-determinism |
| 2608 | /// (HashMap iteration order, stale spill-victim entries, sort |
| 2609 | /// tie-breaking) caused this to flake during the mem2reg work; the |
| 2610 | /// test pins the invariant going forward so any future regression |
| 2611 | /// trips immediately instead of intermittently. |
| 2612 | fn compile_to_asm(compiler: &Path, source: &Path, opt: &str) -> Vec<u8> { |
| 2613 | let asm_path = unique_temp_path( |
| 2614 | "det_asm", |
| 2615 | source.file_stem().unwrap().to_str().unwrap(), |
| 2616 | opt.trim_start_matches('-'), |
| 2617 | ".s", |
| 2618 | ); |
| 2619 | let status = Command::new(compiler) |
| 2620 | .args([ |
| 2621 | source.to_str().unwrap(), |
| 2622 | opt, |
| 2623 | "-S", |
| 2624 | "-o", |
| 2625 | asm_path.to_str().unwrap(), |
| 2626 | ]) |
| 2627 | .status() |
| 2628 | .expect("compiler launch failed"); |
| 2629 | assert!(status.success(), "-S compile failed"); |
| 2630 | let bytes = fs::read(&asm_path).expect("cannot read emitted .s"); |
| 2631 | let _ = fs::remove_file(&asm_path); |
| 2632 | bytes |
| 2633 | } |
| 2634 | |
| 2635 | #[test] |
| 2636 | fn codegen_is_deterministic_at_o2() { |
| 2637 | let compiler = find_compiler(); |
| 2638 | let test_dir = find_test_programs(); |
| 2639 | let source = test_dir.join("two_loops.f90"); |
| 2640 | assert!( |
| 2641 | source.exists(), |
| 2642 | "two_loops.f90 missing — needed for determinism check" |
| 2643 | ); |
| 2644 | |
| 2645 | let first = compile_to_asm(&compiler, &source, "-O2"); |
| 2646 | let second = compile_to_asm(&compiler, &source, "-O2"); |
| 2647 | assert_eq!( |
| 2648 | first, second, |
| 2649 | "two compilations of the same source produced different assembly — \ |
| 2650 | determinism regression. This usually means a HashMap iteration \ |
| 2651 | order leak in codegen." |
| 2652 | ); |
| 2653 | } |
| 2654 | |
| 2655 | /// Determinism regression for programs that import module globals. |
| 2656 | /// Audit B-3: `install_globals_as_locals` iterated a HashMap, so |
| 2657 | /// the emitted `global_addr` instructions landed in non-deterministic |
| 2658 | /// positions — liveness and regalloc then produced different .s |
| 2659 | /// output. This test pins the fix for every opt level that runs a |
| 2660 | /// register allocator. |
| 2661 | #[test] |
| 2662 | fn codegen_is_deterministic_with_module_globals() { |
| 2663 | let compiler = find_compiler(); |
| 2664 | let test_dir = find_test_programs(); |
| 2665 | let source = test_dir.join("module_init.f90"); |
| 2666 | assert!( |
| 2667 | source.exists(), |
| 2668 | "module_init.f90 missing — needed for determinism check" |
| 2669 | ); |
| 2670 | |
| 2671 | for opt in ["-O0", "-O1", "-O2", "-O3"] { |
| 2672 | let first = compile_to_asm(&compiler, &source, opt); |
| 2673 | let second = compile_to_asm(&compiler, &source, opt); |
| 2674 | assert_eq!( |
| 2675 | first, second, |
| 2676 | "two compilations of module_init.f90 produced different assembly at {} — \ |
| 2677 | this usually means install_globals_as_locals is iterating a HashMap \ |
| 2678 | in non-deterministic order.", |
| 2679 | opt, |
| 2680 | ); |
| 2681 | } |
| 2682 | } |
| 2683 | |
| 2684 | #[test] |
| 2685 | fn extract_exit_code_accepts_integer_annotation() { |
| 2686 | let source = "! EXIT_CODE: 17\nprogram t\nend program t\n"; |
| 2687 | assert_eq!(extract_exit_code(source, "inline.f90").unwrap(), Some(17)); |
| 2688 | } |
| 2689 | |
| 2690 | #[test] |
| 2691 | fn extract_error_span_accepts_line_and_column() { |
| 2692 | let source = "! ERROR_EXPECTED: hidden\n! ERROR_SPAN: 13:19\nprogram t\nend program t\n"; |
| 2693 | assert_eq!( |
| 2694 | extract_error_span(source, "inline.f90").unwrap(), |
| 2695 | Some(ExpectedSpan { |
| 2696 | line_num: 2, |
| 2697 | line: 13, |
| 2698 | col: 19, |
| 2699 | }) |
| 2700 | ); |
| 2701 | } |
| 2702 | |
| 2703 | #[test] |
| 2704 | fn extract_file_checks_accepts_relative_path_and_pattern() { |
| 2705 | let source = "! FILE_CHECK: out.txt => hello\n! FILE_NOT: out.txt => goodbye\n"; |
| 2706 | let checks = extract_file_checks(source, "inline.f90", "! FILE_CHECK:", "! FILE_NOT:").unwrap(); |
| 2707 | assert_eq!(checks.len(), 2); |
| 2708 | assert_eq!(checks[0].rel_path, "out.txt"); |
| 2709 | assert_eq!(checks[0].pattern, "hello"); |
| 2710 | assert!(!checks[0].negative); |
| 2711 | assert!(checks[1].negative); |
| 2712 | } |
| 2713 | |
| 2714 | #[test] |
| 2715 | fn extract_file_presence_checks_accepts_exists_and_missing() { |
| 2716 | let source = "! FILE_EXISTS: out.txt\n! FILE_MISSING: ghost.txt\n"; |
| 2717 | let checks = extract_file_presence_checks(source, "inline.f90").unwrap(); |
| 2718 | assert_eq!(checks.len(), 2); |
| 2719 | assert_eq!(checks[0].rel_path, "out.txt"); |
| 2720 | assert!(checks[0].should_exist); |
| 2721 | assert_eq!(checks[1].rel_path, "ghost.txt"); |
| 2722 | assert!(!checks[1].should_exist); |
| 2723 | } |
| 2724 | |
| 2725 | #[test] |
| 2726 | fn extract_file_line_count_checks_accepts_relative_path_and_count() { |
| 2727 | let source = "! FILE_LINE_COUNT: out.txt => 1000\n"; |
| 2728 | let checks = extract_file_line_count_checks(source, "inline.f90").unwrap(); |
| 2729 | assert_eq!(checks.len(), 1); |
| 2730 | assert_eq!(checks[0].rel_path, "out.txt"); |
| 2731 | assert_eq!(checks[0].expected_lines, 1000); |
| 2732 | } |
| 2733 | |
| 2734 | #[test] |
| 2735 | fn extract_file_rerun_mode_checks_accepts_stable_and_append() { |
| 2736 | let source = "! FILE_RERUN_MODE: out.txt => stable\n! FILE_RERUN_MODE: log.txt => append\n"; |
| 2737 | let checks = extract_file_rerun_mode_checks(source, "inline.f90").unwrap(); |
| 2738 | assert_eq!(checks.len(), 2); |
| 2739 | assert_eq!(checks[0].rel_path, "out.txt"); |
| 2740 | assert_eq!(checks[0].mode, FileRerunMode::Stable); |
| 2741 | assert_eq!(checks[1].rel_path, "log.txt"); |
| 2742 | assert_eq!(checks[1].mode, FileRerunMode::Append); |
| 2743 | } |
| 2744 | |
| 2745 | #[test] |
| 2746 | fn extract_file_set_exact_accepts_relative_paths() { |
| 2747 | let check = extract_file_set_exact("! FILE_SET_EXACT: out.txt, log.txt\n", "inline.f90") |
| 2748 | .unwrap() |
| 2749 | .unwrap(); |
| 2750 | assert_eq!(check.rel_paths, vec!["log.txt", "out.txt"]); |
| 2751 | } |
| 2752 | |
| 2753 | #[test] |
| 2754 | fn file_rerun_mode_matcher_accepts_strict_append_growth() { |
| 2755 | let checks = vec![FileRerunModeCheck { |
| 2756 | line_num: 1, |
| 2757 | rel_path: "log.txt".into(), |
| 2758 | mode: FileRerunMode::Append, |
| 2759 | }]; |
| 2760 | let first = RunSnapshot { |
| 2761 | exit_code: 0, |
| 2762 | stdout: "7\n".into(), |
| 2763 | stderr: String::new(), |
| 2764 | files: BTreeMap::from([("log.txt".into(), b" 7\n".to_vec())]), |
| 2765 | }; |
| 2766 | let second = RunSnapshot { |
| 2767 | exit_code: 0, |
| 2768 | stdout: "7\n".into(), |
| 2769 | stderr: String::new(), |
| 2770 | files: BTreeMap::from([("log.txt".into(), b" 7\n 7\n".to_vec())]), |
| 2771 | }; |
| 2772 | |
| 2773 | match_file_rerun_mode_checks(&checks, &first, &second, "inline.f90 [O0]").unwrap(); |
| 2774 | } |
| 2775 | |
| 2776 | #[test] |
| 2777 | fn extract_repro_checks_rejects_unknown_stage() { |
| 2778 | let source = "! REPRO_CHECK: ir\n"; |
| 2779 | let err = extract_repro_checks(source, "inline.f90").unwrap_err(); |
| 2780 | assert!(err.contains("asm, obj, run, run_same_sandbox")); |
| 2781 | } |
| 2782 | |
| 2783 | #[test] |
| 2784 | fn extract_repro_checks_accepts_run_same_sandbox_stage() { |
| 2785 | let source = "! REPRO_CHECK: run_same_sandbox\n"; |
| 2786 | let checks = extract_repro_checks(source, "inline.f90").unwrap(); |
| 2787 | assert_eq!(checks, vec![ReproStage::RunSameSandbox]); |
| 2788 | } |
| 2789 | |
| 2790 | #[test] |
| 2791 | fn extract_opt_eq_rules_accepts_runtime_and_asm_components() { |
| 2792 | let source = "! OPT_EQ: O0,Os,O2 => stdout|stderr|exit|asm\n"; |
| 2793 | let rules = extract_opt_eq_rules(source, "inline.f90").unwrap(); |
| 2794 | assert_eq!(rules.len(), 1); |
| 2795 | assert_eq!(rules[0].opt_flags, vec!["-O0", "-Os", "-O2"]); |
| 2796 | assert_eq!( |
| 2797 | rules[0].components, |
| 2798 | vec![ |
| 2799 | OptEqComponent::Stdout, |
| 2800 | OptEqComponent::Stderr, |
| 2801 | OptEqComponent::Exit, |
| 2802 | OptEqComponent::Asm |
| 2803 | ] |
| 2804 | ); |
| 2805 | } |
| 2806 | |
| 2807 | #[test] |
| 2808 | fn extract_opt_eq_rules_rejects_unknown_component() { |
| 2809 | let source = "! OPT_EQ: O0,O1 => ir\n"; |
| 2810 | let err = extract_opt_eq_rules(source, "inline.f90").unwrap_err(); |
| 2811 | assert!(err.contains("stdout, stderr, exit, or asm")); |
| 2812 | } |
| 2813 | |
| 2814 | #[test] |
| 2815 | fn extract_phase_triangulation_accepts_ir_asm_obj_clean_and_repro() { |
| 2816 | let source = "! PHASE_TRIANGULATE: ir|asm|obj|clean|repro\n"; |
| 2817 | let rule = extract_phase_triangulation(source, "inline.f90") |
| 2818 | .unwrap() |
| 2819 | .unwrap(); |
| 2820 | assert_eq!( |
| 2821 | rule.surfaces, |
| 2822 | vec![ |
| 2823 | PhaseSurface::Ir, |
| 2824 | PhaseSurface::Asm, |
| 2825 | PhaseSurface::Obj, |
| 2826 | PhaseSurface::Clean, |
| 2827 | PhaseSurface::Repro |
| 2828 | ] |
| 2829 | ); |
| 2830 | } |
| 2831 | |
| 2832 | #[test] |
| 2833 | fn extract_phase_triangulation_rejects_unknown_surface() { |
| 2834 | let source = "! PHASE_TRIANGULATE: run\n"; |
| 2835 | let err = extract_phase_triangulation(source, "inline.f90").unwrap_err(); |
| 2836 | assert!(err.contains("ir, asm, obj, clean, or repro")); |
| 2837 | } |
| 2838 | |
| 2839 | #[test] |
| 2840 | fn extract_phase_triangulation_rejects_clean_only() { |
| 2841 | let source = "! PHASE_TRIANGULATE: clean|repro\n"; |
| 2842 | let err = extract_phase_triangulation(source, "inline.f90").unwrap_err(); |
| 2843 | assert!(err.contains("policy-only annotations")); |
| 2844 | } |
| 2845 | |
| 2846 | #[test] |
| 2847 | fn extract_exit_code_rejects_multiple_annotations() { |
| 2848 | let source = "! EXIT_CODE: 1\n! EXIT_CODE: 2\nprogram t\nend program t\n"; |
| 2849 | let err = extract_exit_code(source, "inline.f90").unwrap_err(); |
| 2850 | assert!(err.contains("multiple EXIT_CODE annotations")); |
| 2851 | } |
| 2852 | |
| 2853 | #[test] |
| 2854 | fn match_checks_reports_stderr_check_failures_by_name() { |
| 2855 | let checks = vec![Check { |
| 2856 | line_num: 1, |
| 2857 | pattern: "ERROR STOP".into(), |
| 2858 | }]; |
| 2859 | let err = match_checks( |
| 2860 | &checks, |
| 2861 | "different stderr", |
| 2862 | "inline.f90 [O0]", |
| 2863 | "STDERR_CHECK", |
| 2864 | ) |
| 2865 | .unwrap_err(); |
| 2866 | assert!(err.contains("STDERR_CHECK failed")); |
| 2867 | } |
| 2868 | |
| 2869 | #[test] |
| 2870 | fn diagnostic_contains_span_matches_line_and_column_fragment() { |
| 2871 | let stderr = "armfortas: error: 13:19: hidden is not accessible"; |
| 2872 | assert!(diagnostic_contains_span( |
| 2873 | stderr, |
| 2874 | ExpectedSpan { |
| 2875 | line_num: 1, |
| 2876 | line: 13, |
| 2877 | col: 19, |
| 2878 | } |
| 2879 | )); |
| 2880 | } |
| 2881 | |
| 2882 | #[test] |
| 2883 | fn stderr_and_exit_code_annotations_allow_error_stop() { |
| 2884 | let compiler = find_compiler(); |
| 2885 | let test_dir = find_test_programs(); |
| 2886 | let source = test_dir.join("error_stop_status.f90"); |
| 2887 | assert!( |
| 2888 | source.exists(), |
| 2889 | "error_stop_status.f90 missing — needed for stderr/exit-code harness coverage" |
| 2890 | ); |
| 2891 | |
| 2892 | match run_test(&compiler, &source, "-O0") { |
| 2893 | TestOutcome::Pass => {} |
| 2894 | other => panic!("error_stop_status.f90 should pass, got {:?}", other), |
| 2895 | } |
| 2896 | } |
| 2897 | |
| 2898 | #[test] |
| 2899 | fn error_expected_and_span_match_hidden_use_only_error() { |
| 2900 | let compiler = find_compiler(); |
| 2901 | let test_dir = find_test_programs(); |
| 2902 | let source = test_dir.join("audit6_filter_associate.f90"); |
| 2903 | assert!( |
| 2904 | source.exists(), |
| 2905 | "audit6_filter_associate.f90 missing — needed for ERROR_SPAN coverage" |
| 2906 | ); |
| 2907 | |
| 2908 | match run_test(&compiler, &source, "-O0") { |
| 2909 | TestOutcome::Pass => {} |
| 2910 | other => panic!( |
| 2911 | "audit6_filter_associate.f90 should pass with ERROR_EXPECTED + ERROR_SPAN, got {:?}", |
| 2912 | other |
| 2913 | ), |
| 2914 | } |
| 2915 | } |
| 2916 | |
| 2917 | #[test] |
| 2918 | fn file_checks_allow_file_roundtrip() { |
| 2919 | let compiler = find_compiler(); |
| 2920 | let test_dir = find_test_programs(); |
| 2921 | let source = test_dir.join("file_io.f90"); |
| 2922 | assert!( |
| 2923 | source.exists(), |
| 2924 | "file_io.f90 missing — needed for FILE_CHECK coverage" |
| 2925 | ); |
| 2926 | |
| 2927 | match run_test(&compiler, &source, "-O0") { |
| 2928 | TestOutcome::Pass => {} |
| 2929 | other => panic!( |
| 2930 | "file_io.f90 should pass with FILE_CHECK coverage, got {:?}", |
| 2931 | other |
| 2932 | ), |
| 2933 | } |
| 2934 | } |
| 2935 | |
| 2936 | #[test] |
| 2937 | fn file_presence_checks_allow_rewind_side_effects() { |
| 2938 | let compiler = find_compiler(); |
| 2939 | let test_dir = find_test_programs(); |
| 2940 | let source = test_dir.join("io_rewind.f90"); |
| 2941 | assert!( |
| 2942 | source.exists(), |
| 2943 | "io_rewind.f90 missing — needed for FILE_EXISTS/FILE_MISSING coverage" |
| 2944 | ); |
| 2945 | |
| 2946 | match run_test(&compiler, &source, "-O0") { |
| 2947 | TestOutcome::Pass => {} |
| 2948 | other => panic!( |
| 2949 | "io_rewind.f90 should pass with FILE_EXISTS/FILE_MISSING coverage, got {:?}", |
| 2950 | other |
| 2951 | ), |
| 2952 | } |
| 2953 | } |
| 2954 | |
| 2955 | #[test] |
| 2956 | fn file_set_exact_allows_rewind_single_output() { |
| 2957 | let compiler = find_compiler(); |
| 2958 | let test_dir = find_test_programs(); |
| 2959 | let source = test_dir.join("io_rewind.f90"); |
| 2960 | assert!( |
| 2961 | source.exists(), |
| 2962 | "io_rewind.f90 missing — needed for FILE_SET_EXACT coverage" |
| 2963 | ); |
| 2964 | |
| 2965 | match run_test(&compiler, &source, "-O0") { |
| 2966 | TestOutcome::Pass => {} |
| 2967 | other => panic!( |
| 2968 | "io_rewind.f90 should pass with FILE_SET_EXACT coverage, got {:?}", |
| 2969 | other |
| 2970 | ), |
| 2971 | } |
| 2972 | } |
| 2973 | |
| 2974 | #[test] |
| 2975 | fn file_rerun_mode_append_fixture_passes_and_keeps_append_coverage() { |
| 2976 | let compiler = find_compiler(); |
| 2977 | let test_dir = find_test_programs(); |
| 2978 | let source = test_dir.join("io_append_log.f90"); |
| 2979 | assert!( |
| 2980 | source.exists(), |
| 2981 | "io_append_log.f90 missing — needed for FILE_RERUN_MODE append coverage" |
| 2982 | ); |
| 2983 | |
| 2984 | match run_test(&compiler, &source, "-O0") { |
| 2985 | TestOutcome::Pass => {} |
| 2986 | other => panic!( |
| 2987 | "io_append_log.f90 should pass while still exercising FILE_RERUN_MODE(append), got {:?}", |
| 2988 | other |
| 2989 | ), |
| 2990 | } |
| 2991 | } |
| 2992 | |
| 2993 | #[test] |
| 2994 | fn file_line_count_and_same_sandbox_repro_allow_flush_stress() { |
| 2995 | let compiler = find_compiler(); |
| 2996 | let test_dir = find_test_programs(); |
| 2997 | let source = test_dir.join("io_flush_stress.f90"); |
| 2998 | assert!( |
| 2999 | source.exists(), |
| 3000 | "io_flush_stress.f90 missing — needed for FILE_LINE_COUNT and run_same_sandbox coverage" |
| 3001 | ); |
| 3002 | |
| 3003 | match run_test(&compiler, &source, "-O0") { |
| 3004 | TestOutcome::Pass => {} |
| 3005 | other => panic!( |
| 3006 | "io_flush_stress.f90 should pass with FILE_LINE_COUNT and REPRO_CHECK(run_same_sandbox), got {:?}", |
| 3007 | other |
| 3008 | ), |
| 3009 | } |
| 3010 | } |
| 3011 | |
| 3012 | #[test] |
| 3013 | fn repro_checks_allow_hello_stage_repro() { |
| 3014 | let compiler = find_compiler(); |
| 3015 | let test_dir = find_test_programs(); |
| 3016 | let source = test_dir.join("hello.f90"); |
| 3017 | assert!( |
| 3018 | source.exists(), |
| 3019 | "hello.f90 missing — needed for REPRO_CHECK coverage" |
| 3020 | ); |
| 3021 | |
| 3022 | match run_test(&compiler, &source, "-O0") { |
| 3023 | TestOutcome::Pass => {} |
| 3024 | other => panic!( |
| 3025 | "hello.f90 should pass with REPRO_CHECK coverage, got {:?}", |
| 3026 | other |
| 3027 | ), |
| 3028 | } |
| 3029 | } |
| 3030 | |
| 3031 | #[test] |
| 3032 | fn opt_eq_annotations_allow_hello_cross_opt_invariant() { |
| 3033 | let compiler = find_compiler(); |
| 3034 | let test_dir = find_test_programs(); |
| 3035 | let source = test_dir.join("hello.f90"); |
| 3036 | assert!( |
| 3037 | source.exists(), |
| 3038 | "hello.f90 missing — needed for OPT_EQ coverage" |
| 3039 | ); |
| 3040 | |
| 3041 | match run_test(&compiler, &source, "-O0") { |
| 3042 | TestOutcome::Pass => {} |
| 3043 | other => panic!( |
| 3044 | "hello.f90 should pass with OPT_EQ coverage, got {:?}", |
| 3045 | other |
| 3046 | ), |
| 3047 | } |
| 3048 | } |
| 3049 | |
| 3050 | #[test] |
| 3051 | fn phase_triangulation_allows_function_call_pipeline_surfaces() { |
| 3052 | let compiler = find_compiler(); |
| 3053 | let test_dir = find_test_programs(); |
| 3054 | let source = test_dir.join("function_call.f90"); |
| 3055 | assert!( |
| 3056 | source.exists(), |
| 3057 | "function_call.f90 missing — needed for PHASE_TRIANGULATE coverage" |
| 3058 | ); |
| 3059 | |
| 3060 | match run_test(&compiler, &source, "-O0") { |
| 3061 | TestOutcome::Pass => {} |
| 3062 | other => panic!( |
| 3063 | "function_call.f90 should pass with PHASE_TRIANGULATE coverage, got {:?}", |
| 3064 | other |
| 3065 | ), |
| 3066 | } |
| 3067 | } |
| 3068 | |
| 3069 | #[test] |
| 3070 | fn phase_triangulation_repro_keeps_function_call_compile_surfaces_stable() { |
| 3071 | let compiler = find_compiler(); |
| 3072 | let test_dir = find_test_programs(); |
| 3073 | let source = test_dir.join("function_call.f90"); |
| 3074 | assert!( |
| 3075 | source.exists(), |
| 3076 | "function_call.f90 missing — needed for PHASE_TRIANGULATE(repro) coverage" |
| 3077 | ); |
| 3078 | |
| 3079 | match run_test(&compiler, &source, "-O0") { |
| 3080 | TestOutcome::Pass => {} |
| 3081 | other => panic!( |
| 3082 | "function_call.f90 should pass with PHASE_TRIANGULATE(repro) coverage, got {:?}", |
| 3083 | other |
| 3084 | ), |
| 3085 | } |
| 3086 | } |
| 3087 | |
| 3088 | #[test] |
| 3089 | fn phase_triangulation_clean_keeps_compile_phases_free_of_runtime_files() { |
| 3090 | let compiler = find_compiler(); |
| 3091 | let test_dir = find_test_programs(); |
| 3092 | let source = test_dir.join("io_flush_stress.f90"); |
| 3093 | assert!( |
| 3094 | source.exists(), |
| 3095 | "io_flush_stress.f90 missing — needed for PHASE_TRIANGULATE(clean) coverage" |
| 3096 | ); |
| 3097 | |
| 3098 | match run_test(&compiler, &source, "-O0") { |
| 3099 | TestOutcome::Pass => {} |
| 3100 | other => panic!( |
| 3101 | "io_flush_stress.f90 should pass with PHASE_TRIANGULATE(clean) coverage, got {:?}", |
| 3102 | other |
| 3103 | ), |
| 3104 | } |
| 3105 | } |
| 3106 | |
| 3107 | #[test] |
| 3108 | fn split_multifile_segments_parses_markers() { |
| 3109 | let src = "\ |
| 3110 | ! CHECK: 42 |
| 3111 | ! MULTIFILE_LINK: mod.f90 main.f90 |
| 3112 | !--- file: mod.f90 |
| 3113 | module m |
| 3114 | integer :: x = 42 |
| 3115 | end module |
| 3116 | !--- file: main.f90 |
| 3117 | program p |
| 3118 | use m |
| 3119 | print *, x |
| 3120 | end program |
| 3121 | "; |
| 3122 | let segs = split_multifile_segments(src).unwrap(); |
| 3123 | assert_eq!(segs.len(), 2); |
| 3124 | assert_eq!(segs[0].name, "mod.f90"); |
| 3125 | assert!(segs[0].source.contains("module m")); |
| 3126 | assert_eq!(segs[1].name, "main.f90"); |
| 3127 | assert!(segs[1].source.contains("program p")); |
| 3128 | } |
| 3129 | |
| 3130 | #[test] |
| 3131 | fn split_multifile_segments_returns_none_for_single_file() { |
| 3132 | let src = "program t\n print *, 1\nend program\n"; |
| 3133 | assert!(split_multifile_segments(src).is_none()); |
| 3134 | } |
| 3135 | |
| 3136 | #[test] |
| 3137 | fn extract_multifile_link_parses_order() { |
| 3138 | let src = "! MULTIFILE_LINK: a.f90 b.f90 c.f90\n! CHECK: ok\n"; |
| 3139 | let order = extract_multifile_link(src).unwrap(); |
| 3140 | assert_eq!(order, vec!["a.f90", "b.f90", "c.f90"]); |
| 3141 | } |
| 3142 | |
| 3143 | #[test] |
| 3144 | fn multifile_basic_module_runs_at_o0() { |
| 3145 | let compiler = find_compiler(); |
| 3146 | let test_dir = find_test_programs(); |
| 3147 | let source = test_dir.join("multifile_basic_module.f90"); |
| 3148 | assert!(source.exists(), "multifile_basic_module.f90 missing"); |
| 3149 | match run_test(&compiler, &source, "-O0") { |
| 3150 | TestOutcome::Pass => {} |
| 3151 | other => panic!( |
| 3152 | "multifile_basic_module.f90 should pass at -O0, got {:?}", |
| 3153 | other |
| 3154 | ), |
| 3155 | } |
| 3156 | } |
| 3157 | |
| 3158 | #[test] |
| 3159 | fn multifile_three_modules_runs_at_o0() { |
| 3160 | let compiler = find_compiler(); |
| 3161 | let test_dir = find_test_programs(); |
| 3162 | let source = test_dir.join("multifile_three_modules.f90"); |
| 3163 | assert!(source.exists(), "multifile_three_modules.f90 missing"); |
| 3164 | match run_test(&compiler, &source, "-O0") { |
| 3165 | TestOutcome::Pass => {} |
| 3166 | other => panic!( |
| 3167 | "multifile_three_modules.f90 should pass at -O0, got {:?}", |
| 3168 | other |
| 3169 | ), |
| 3170 | } |
| 3171 | } |
| 3172 | |
| 3173 | #[test] |
| 3174 | fn multifile_error_circular_direct_detected() { |
| 3175 | let compiler = find_compiler(); |
| 3176 | let test_dir = find_test_programs(); |
| 3177 | let source = test_dir.join("error_circular_use_direct.f90"); |
| 3178 | assert!(source.exists(), "error_circular_use_direct.f90 missing"); |
| 3179 | match run_test(&compiler, &source, "-O0") { |
| 3180 | TestOutcome::Pass => {} |
| 3181 | other => panic!( |
| 3182 | "circular use direct should pass (ERROR_EXPECTED match), got {:?}", |
| 3183 | other |
| 3184 | ), |
| 3185 | } |
| 3186 | } |
| 3187 | |
| 3188 | #[test] |
| 3189 | fn multifile_error_circular_direct_detected_at_o1() { |
| 3190 | let compiler = find_compiler(); |
| 3191 | let test_dir = find_test_programs(); |
| 3192 | let source = test_dir.join("error_circular_use_direct.f90"); |
| 3193 | assert!(source.exists(), "error_circular_use_direct.f90 missing"); |
| 3194 | match run_test(&compiler, &source, "-O1") { |
| 3195 | TestOutcome::Pass => {} |
| 3196 | other => panic!( |
| 3197 | "circular use direct should pass (ERROR_EXPECTED match) at -O1, got {:?}", |
| 3198 | other |
| 3199 | ), |
| 3200 | } |
| 3201 | } |
| 3202 | |
| 3203 | #[test] |
| 3204 | fn multifile_error_circular_indirect_detected() { |
| 3205 | let compiler = find_compiler(); |
| 3206 | let test_dir = find_test_programs(); |
| 3207 | let source = test_dir.join("error_circular_use_indirect.f90"); |
| 3208 | assert!(source.exists(), "error_circular_use_indirect.f90 missing"); |
| 3209 | match run_test(&compiler, &source, "-O0") { |
| 3210 | TestOutcome::Pass => {} |
| 3211 | other => panic!( |
| 3212 | "circular use indirect should pass (ERROR_EXPECTED match), got {:?}", |
| 3213 | other |
| 3214 | ), |
| 3215 | } |
| 3216 | } |
| 3217 | |
| 3218 | #[test] |
| 3219 | fn multifile_error_circular_indirect_detected_at_o1() { |
| 3220 | let compiler = find_compiler(); |
| 3221 | let test_dir = find_test_programs(); |
| 3222 | let source = test_dir.join("error_circular_use_indirect.f90"); |
| 3223 | assert!(source.exists(), "error_circular_use_indirect.f90 missing"); |
| 3224 | match run_test(&compiler, &source, "-O1") { |
| 3225 | TestOutcome::Pass => {} |
| 3226 | other => panic!( |
| 3227 | "circular use indirect should pass (ERROR_EXPECTED match) at -O1, got {:?}", |
| 3228 | other |
| 3229 | ), |
| 3230 | } |
| 3231 | } |
| 3232 | |
| 3233 | #[test] |
| 3234 | fn single_file_module_program_does_not_leave_root_amod() { |
| 3235 | let compiler = find_compiler(); |
| 3236 | let test_dir = find_test_programs(); |
| 3237 | let source = test_dir.join("module_global_host_assoc.f90"); |
| 3238 | assert!(source.exists(), "module_global_host_assoc.f90 missing"); |
| 3239 | let leaked = PathBuf::from("module_global_host_assoc_mod.amod"); |
| 3240 | let _ = fs::remove_file(&leaked); |
| 3241 | match run_test(&compiler, &source, "-O0") { |
| 3242 | TestOutcome::Pass => {} |
| 3243 | other => panic!( |
| 3244 | "module_global_host_assoc.f90 should pass at -O0 without leaking .amod, got {:?}", |
| 3245 | other |
| 3246 | ), |
| 3247 | } |
| 3248 | assert!( |
| 3249 | !leaked.exists(), |
| 3250 | "single-file run_test should not leak {} into the repo root", |
| 3251 | leaked.display() |
| 3252 | ); |
| 3253 | } |
| 3254 |