Add linker job limit
Authored by
mfwolffe <wolffemf@dukes.jmu.edu>
- SHA
47f6d741bd9d4dbec1daa3367e65c33e3996ab6f- Parents
-
4096883 - Tree
a2adda6
47f6d74
47f6d741bd9d4dbec1daa3367e65c33e3996ab6f4096883
a2adda6| Status | File | + | - |
|---|---|---|---|
| M |
src/args.rs
|
60 | 0 |
| M |
src/lib.rs
|
42 | 11 |
| M |
src/main.rs
|
1 | 0 |
| M |
src/resolve.rs
|
21 | 12 |
| M |
tests/determinism.rs
|
16 | 0 |
| M |
tests/resolve_integration.rs
|
2 | 2 |
| M |
tests/snapshots/help.txt
|
1 | 0 |
src/args.rsmodified@@ -55,6 +55,7 @@ const KNOWN_FLAGS: &[&str] = &[ | ||
| 55 | 55 | "-dylib", |
| 56 | 56 | "-all_load", |
| 57 | 57 | "-force_load", |
| 58 | + "-j", | |
| 58 | 59 | "--dump", |
| 59 | 60 | "--dump-archive", |
| 60 | 61 | "--dump-dylib", |
@@ -141,6 +142,24 @@ fn parse_version_component(flag: &str, value: &str) -> Result<u32, ArgsError> { | ||
| 141 | 142 | Ok((major << 16) | ((minor & 0xff) << 8) | (patch & 0xff)) |
| 142 | 143 | } |
| 143 | 144 | |
| 145 | +fn parse_jobs(value: &str) -> Result<usize, ArgsError> { | |
| 146 | + let jobs = value | |
| 147 | + .parse::<usize>() | |
| 148 | + .map_err(|_| ArgsError::InvalidValue { | |
| 149 | + flag: "-j".into(), | |
| 150 | + value: value.to_string(), | |
| 151 | + expected: "positive integer job count".into(), | |
| 152 | + })?; | |
| 153 | + if jobs == 0 { | |
| 154 | + return Err(ArgsError::InvalidValue { | |
| 155 | + flag: "-j".into(), | |
| 156 | + value: value.to_string(), | |
| 157 | + expected: "positive integer job count".into(), | |
| 158 | + }); | |
| 159 | + } | |
| 160 | + Ok(jobs) | |
| 161 | +} | |
| 162 | + | |
| 144 | 163 | pub fn parse(argv: &[String]) -> Result<LinkOptions, ArgsError> { |
| 145 | 164 | let normalized = normalize_wl(argv); |
| 146 | 165 | let mut opts = LinkOptions::default(); |
@@ -395,6 +414,12 @@ pub fn parse(argv: &[String]) -> Result<LinkOptions, ArgsError> { | ||
| 395 | 414 | ArgsError::MissingValue("-force_load".into()) |
| 396 | 415 | })?)); |
| 397 | 416 | } |
| 417 | + "-j" => { | |
| 418 | + let value = it | |
| 419 | + .next() | |
| 420 | + .ok_or_else(|| ArgsError::MissingValue("-j".into()))?; | |
| 421 | + opts.jobs = Some(parse_jobs(value)?); | |
| 422 | + } | |
| 398 | 423 | "--dump" => { |
| 399 | 424 | opts.dump = Some(PathBuf::from( |
| 400 | 425 | it.next() |
@@ -785,6 +810,41 @@ mod tests { | ||
| 785 | 810 | assert_eq!(opts.inputs, vec![PathBuf::from("main.o")]); |
| 786 | 811 | } |
| 787 | 812 | |
| 813 | + #[test] | |
| 814 | + fn jobs_flag_records_positive_worker_limit() { | |
| 815 | + let opts = parse(&argv(&["-j", "1", "main.o"])).unwrap(); | |
| 816 | + assert_eq!(opts.jobs, Some(1)); | |
| 817 | + assert_eq!(opts.inputs, vec![PathBuf::from("main.o")]); | |
| 818 | + } | |
| 819 | + | |
| 820 | + #[test] | |
| 821 | + fn jobs_flag_rejects_zero_or_non_numeric_values() { | |
| 822 | + let err = parse(&argv(&["-j", "0"])).unwrap_err(); | |
| 823 | + assert!(matches!( | |
| 824 | + err, | |
| 825 | + ArgsError::InvalidValue { | |
| 826 | + ref flag, | |
| 827 | + ref value, | |
| 828 | + .. | |
| 829 | + } if flag == "-j" && value == "0" | |
| 830 | + )); | |
| 831 | + let err = parse(&argv(&["-j", "many"])).unwrap_err(); | |
| 832 | + assert!(matches!( | |
| 833 | + err, | |
| 834 | + ArgsError::InvalidValue { | |
| 835 | + ref flag, | |
| 836 | + ref value, | |
| 837 | + .. | |
| 838 | + } if flag == "-j" && value == "many" | |
| 839 | + )); | |
| 840 | + } | |
| 841 | + | |
| 842 | + #[test] | |
| 843 | + fn missing_jobs_value_errors() { | |
| 844 | + let err = parse(&argv(&["-j"])).unwrap_err(); | |
| 845 | + assert!(matches!(err, ArgsError::MissingValue(ref f) if f == "-j")); | |
| 846 | + } | |
| 847 | + | |
| 788 | 848 | #[test] |
| 789 | 849 | fn missing_force_load_value_errors() { |
| 790 | 850 | let err = parse(&argv(&["-force_load"])).unwrap_err(); |
src/lib.rsmodified@@ -125,6 +125,7 @@ pub struct LinkOptions { | ||
| 125 | 125 | pub fixup_chains: bool, |
| 126 | 126 | pub all_load: bool, |
| 127 | 127 | pub force_load_archives: Vec<PathBuf>, |
| 128 | + pub jobs: Option<usize>, | |
| 128 | 129 | pub kind: OutputKind, |
| 129 | 130 | /// When set, afs-ld operates in dump mode and prints the given file's |
| 130 | 131 | /// header + load commands instead of linking. |
@@ -176,6 +177,7 @@ impl Default for LinkOptions { | ||
| 176 | 177 | fixup_chains: false, |
| 177 | 178 | all_load: false, |
| 178 | 179 | force_load_archives: Vec::new(), |
| 180 | + jobs: None, | |
| 179 | 181 | kind: OutputKind::Executable, |
| 180 | 182 | dump: None, |
| 181 | 183 | dump_archive: None, |
@@ -185,6 +187,18 @@ impl Default for LinkOptions { | ||
| 185 | 187 | } |
| 186 | 188 | } |
| 187 | 189 | |
| 190 | +impl LinkOptions { | |
| 191 | + pub fn parallel_jobs(&self) -> usize { | |
| 192 | + self.jobs | |
| 193 | + .unwrap_or_else(|| { | |
| 194 | + thread::available_parallelism() | |
| 195 | + .map(usize::from) | |
| 196 | + .unwrap_or(1) | |
| 197 | + }) | |
| 198 | + .max(1) | |
| 199 | + } | |
| 200 | +} | |
| 201 | + | |
| 188 | 202 | #[derive(Debug)] |
| 189 | 203 | pub enum LinkError { |
| 190 | 204 | /// No input files were provided on the command line. |
@@ -445,6 +459,7 @@ impl Linker { | ||
| 445 | 459 | if opts.inputs.is_empty() && opts.library_names.is_empty() && opts.frameworks.is_empty() { |
| 446 | 460 | return Err(LinkError::NoInputs); |
| 447 | 461 | } |
| 462 | + let parallel_jobs = opts.parallel_jobs(); | |
| 448 | 463 | |
| 449 | 464 | if let Some(arch) = &opts.arch { |
| 450 | 465 | if arch != "arm64" { |
@@ -513,7 +528,7 @@ impl Linker { | ||
| 513 | 528 | } |
| 514 | 529 | initial_loads.push((load_order, path.clone())); |
| 515 | 530 | } |
| 516 | - for loaded in load_initial_inputs(initial_loads)? { | |
| 531 | + for loaded in load_initial_inputs(initial_loads, parallel_jobs)? { | |
| 517 | 532 | let timings = register_loaded_initial_input(&mut inputs, loaded); |
| 518 | 533 | phases.add_input_load(timings); |
| 519 | 534 | } |
@@ -540,13 +555,24 @@ impl Linker { | ||
| 540 | 555 | |
| 541 | 556 | let mut force_report = DrainReport::default(); |
| 542 | 557 | if opts.all_load { |
| 543 | - force_load_all(&mut inputs, &mut sym_table, &mut force_report)?; | |
| 558 | + force_load_all( | |
| 559 | + &mut inputs, | |
| 560 | + &mut sym_table, | |
| 561 | + &mut force_report, | |
| 562 | + parallel_jobs, | |
| 563 | + )?; | |
| 544 | 564 | } |
| 545 | 565 | for archive_path in &opts.force_load_archives { |
| 546 | 566 | let Some(archive_id) = find_archive_by_path(&inputs, archive_path) else { |
| 547 | 567 | return Err(LinkError::ForceLoadNotArchive(archive_path.clone())); |
| 548 | 568 | }; |
| 549 | - force_load_archive(&mut inputs, &mut sym_table, archive_id, &mut force_report)?; | |
| 569 | + force_load_archive( | |
| 570 | + &mut inputs, | |
| 571 | + &mut sym_table, | |
| 572 | + archive_id, | |
| 573 | + &mut force_report, | |
| 574 | + parallel_jobs, | |
| 575 | + )?; | |
| 550 | 576 | } |
| 551 | 577 | if opts.trace_inputs { |
| 552 | 578 | for path in &force_report.loaded_paths { |
@@ -561,7 +587,12 @@ impl Linker { | ||
| 561 | 587 | return Err(LinkError::DuplicateSymbols(msg)); |
| 562 | 588 | } |
| 563 | 589 | |
| 564 | - let drain_report = drain_fetches(&mut inputs, &mut sym_table, seed_report.pending_fetches)?; | |
| 590 | + let drain_report = drain_fetches( | |
| 591 | + &mut inputs, | |
| 592 | + &mut sym_table, | |
| 593 | + seed_report.pending_fetches, | |
| 594 | + parallel_jobs, | |
| 595 | + )?; | |
| 565 | 596 | if opts.trace_inputs { |
| 566 | 597 | for path in &drain_report.loaded_paths { |
| 567 | 598 | eprintln!("afs-ld: loading {}", path.display()); |
@@ -991,7 +1022,10 @@ struct InitialLoadError { | ||
| 991 | 1022 | error: LinkError, |
| 992 | 1023 | } |
| 993 | 1024 | |
| 994 | -fn load_initial_inputs(loads: Vec<(usize, PathBuf)>) -> Result<Vec<LoadedInitialInput>, LinkError> { | |
| 1025 | +fn load_initial_inputs( | |
| 1026 | + loads: Vec<(usize, PathBuf)>, | |
| 1027 | + parallel_jobs: usize, | |
| 1028 | +) -> Result<Vec<LoadedInitialInput>, LinkError> { | |
| 995 | 1029 | let mut results = Vec::new(); |
| 996 | 1030 | let mut object_jobs = Vec::new(); |
| 997 | 1031 | for (load_order, path) in loads { |
@@ -1001,7 +1035,7 @@ fn load_initial_inputs(loads: Vec<(usize, PathBuf)>) -> Result<Vec<LoadedInitial | ||
| 1001 | 1035 | object_jobs.push((load_order, path)); |
| 1002 | 1036 | } |
| 1003 | 1037 | } |
| 1004 | - results.extend(load_objects_parallel(object_jobs)); | |
| 1038 | + results.extend(load_objects_parallel(object_jobs, parallel_jobs)); | |
| 1005 | 1039 | results.sort_by_key(|result| match result { |
| 1006 | 1040 | Ok(input) => input.load_order(), |
| 1007 | 1041 | Err(error) => error.load_order, |
@@ -1019,15 +1053,12 @@ fn load_initial_inputs(loads: Vec<(usize, PathBuf)>) -> Result<Vec<LoadedInitial | ||
| 1019 | 1053 | |
| 1020 | 1054 | fn load_objects_parallel( |
| 1021 | 1055 | jobs: Vec<(usize, PathBuf)>, |
| 1056 | + parallel_jobs: usize, | |
| 1022 | 1057 | ) -> Vec<Result<LoadedInitialInput, InitialLoadError>> { |
| 1023 | 1058 | if jobs.is_empty() { |
| 1024 | 1059 | return Vec::new(); |
| 1025 | 1060 | } |
| 1026 | - let job_count = thread::available_parallelism() | |
| 1027 | - .map(usize::from) | |
| 1028 | - .unwrap_or(1) | |
| 1029 | - .min(jobs.len()) | |
| 1030 | - .max(1); | |
| 1061 | + let job_count = parallel_jobs.max(1).min(jobs.len()).max(1); | |
| 1031 | 1062 | if job_count == 1 { |
| 1032 | 1063 | return jobs |
| 1033 | 1064 | .into_iter() |
src/main.rsmodified@@ -45,6 +45,7 @@ Options: | ||
| 45 | 45 | Select chained fixups vs classic dyld info |
| 46 | 46 | -all_load Force-load every archive member |
| 47 | 47 | -force_load <archive> Force-load one archive |
| 48 | + -j <jobs> Limit parallel worker jobs (`1` disables parallelism) | |
| 48 | 49 | -Wl,<arg,arg,...> Normalize comma-separated driver flags |
| 49 | 50 | --dump <path> Dump a Mach-O file summary |
| 50 | 51 | --dump-archive <path> Dump an archive summary |
src/resolve.rsmodified@@ -1215,16 +1215,13 @@ fn make_archive_member_jobs<'a>( | ||
| 1215 | 1215 | fn load_archive_members_parallel( |
| 1216 | 1216 | inputs: &Inputs, |
| 1217 | 1217 | keys: Vec<ArchiveMemberKey>, |
| 1218 | + parallel_jobs: usize, | |
| 1218 | 1219 | ) -> Vec<(ArchiveMemberKey, Result<LoadedArchiveMember, FetchError>)> { |
| 1219 | 1220 | let jobs = make_archive_member_jobs(inputs, keys); |
| 1220 | 1221 | if jobs.is_empty() { |
| 1221 | 1222 | return Vec::new(); |
| 1222 | 1223 | } |
| 1223 | - let job_count = thread::available_parallelism() | |
| 1224 | - .map(usize::from) | |
| 1225 | - .unwrap_or(1) | |
| 1226 | - .min(jobs.len()) | |
| 1227 | - .max(1); | |
| 1224 | + let job_count = parallel_jobs.max(1).min(jobs.len()).max(1); | |
| 1228 | 1225 | if job_count == 1 { |
| 1229 | 1226 | return jobs |
| 1230 | 1227 | .into_iter() |
@@ -1346,11 +1343,12 @@ fn load_and_ingest_member( | ||
| 1346 | 1343 | table: &mut SymbolTable, |
| 1347 | 1344 | key: ArchiveMemberKey, |
| 1348 | 1345 | report: &mut DrainReport, |
| 1346 | + parallel_jobs: usize, | |
| 1349 | 1347 | ) -> Result<Vec<PendingFetch>, FetchError> { |
| 1350 | 1348 | if archive_member_is_fetched(inputs, key) { |
| 1351 | 1349 | return Ok(Vec::new()); |
| 1352 | 1350 | } |
| 1353 | - let loaded = load_archive_members_parallel(inputs, vec![key]) | |
| 1351 | + let loaded = load_archive_members_parallel(inputs, vec![key], parallel_jobs) | |
| 1354 | 1352 | .into_iter() |
| 1355 | 1353 | .next() |
| 1356 | 1354 | .expect("single archive member load should produce one result") |
@@ -1366,12 +1364,19 @@ fn fetch_and_ingest_one( | ||
| 1366 | 1364 | table: &mut SymbolTable, |
| 1367 | 1365 | pending: PendingFetch, |
| 1368 | 1366 | report: &mut DrainReport, |
| 1367 | + parallel_jobs: usize, | |
| 1369 | 1368 | ) -> Result<Vec<PendingFetch>, FetchError> { |
| 1370 | 1369 | let slot_is_still_lazy = matches!(table.get(pending.id), Symbol::LazyArchive { .. }); |
| 1371 | 1370 | if !slot_is_still_lazy { |
| 1372 | 1371 | return Ok(Vec::new()); |
| 1373 | 1372 | } |
| 1374 | - load_and_ingest_member(inputs, table, archive_member_key(pending), report) | |
| 1373 | + load_and_ingest_member( | |
| 1374 | + inputs, | |
| 1375 | + table, | |
| 1376 | + archive_member_key(pending), | |
| 1377 | + report, | |
| 1378 | + parallel_jobs, | |
| 1379 | + ) | |
| 1375 | 1380 | } |
| 1376 | 1381 | |
| 1377 | 1382 | /// Pull every member of one archive (bypasses demand tracking). Respects |
@@ -1382,6 +1387,7 @@ pub fn force_load_archive( | ||
| 1382 | 1387 | table: &mut SymbolTable, |
| 1383 | 1388 | archive_id: ArchiveId, |
| 1384 | 1389 | report: &mut DrainReport, |
| 1390 | + parallel_jobs: usize, | |
| 1385 | 1391 | ) -> Result<(), FetchError> { |
| 1386 | 1392 | let member_offsets: Vec<u32> = { |
| 1387 | 1393 | let ai = &inputs.archives[archive_id.0 as usize]; |
@@ -1399,12 +1405,12 @@ pub fn force_load_archive( | ||
| 1399 | 1405 | }) |
| 1400 | 1406 | .collect(); |
| 1401 | 1407 | let mut queue: Vec<PendingFetch> = Vec::new(); |
| 1402 | - for (_, loaded) in load_archive_members_parallel(inputs, keys) { | |
| 1408 | + for (_, loaded) in load_archive_members_parallel(inputs, keys, parallel_jobs) { | |
| 1403 | 1409 | let new = ingest_loaded_member(inputs, table, loaded?, report)?; |
| 1404 | 1410 | queue.extend(new); |
| 1405 | 1411 | } |
| 1406 | 1412 | while let Some(p) = queue.pop() { |
| 1407 | - let new = fetch_and_ingest_one(inputs, table, p, report)?; | |
| 1413 | + let new = fetch_and_ingest_one(inputs, table, p, report, parallel_jobs)?; | |
| 1408 | 1414 | queue.extend(new); |
| 1409 | 1415 | } |
| 1410 | 1416 | Ok(()) |
@@ -1416,9 +1422,10 @@ pub fn force_load_all( | ||
| 1416 | 1422 | inputs: &mut Inputs, |
| 1417 | 1423 | table: &mut SymbolTable, |
| 1418 | 1424 | report: &mut DrainReport, |
| 1425 | + parallel_jobs: usize, | |
| 1419 | 1426 | ) -> Result<(), FetchError> { |
| 1420 | 1427 | for i in 0..inputs.archives.len() { |
| 1421 | - force_load_archive(inputs, table, ArchiveId(i as u32), report)?; | |
| 1428 | + force_load_archive(inputs, table, ArchiveId(i as u32), report, parallel_jobs)?; | |
| 1422 | 1429 | } |
| 1423 | 1430 | Ok(()) |
| 1424 | 1431 | } |
@@ -1697,6 +1704,7 @@ pub fn drain_fetches( | ||
| 1697 | 1704 | inputs: &mut Inputs, |
| 1698 | 1705 | table: &mut SymbolTable, |
| 1699 | 1706 | initial: Vec<PendingFetch>, |
| 1707 | + parallel_jobs: usize, | |
| 1700 | 1708 | ) -> Result<DrainReport, FetchError> { |
| 1701 | 1709 | let mut queue = initial; |
| 1702 | 1710 | let mut prepared = HashMap::new(); |
@@ -1711,7 +1719,7 @@ pub fn drain_fetches( | ||
| 1711 | 1719 | // Parse siblings ahead of time, but only ingest the current stack |
| 1712 | 1720 | // entry after re-checking its lazy slot. This keeps member order stable. |
| 1713 | 1721 | if !prepared.contains_key(&key) { |
| 1714 | - preparse_pending_fetches(inputs, table, p, &queue, &mut prepared); | |
| 1722 | + preparse_pending_fetches(inputs, table, p, &queue, &mut prepared, parallel_jobs); | |
| 1715 | 1723 | } |
| 1716 | 1724 | let Some(loaded) = prepared.remove(&key) else { |
| 1717 | 1725 | continue; |
@@ -1733,6 +1741,7 @@ fn preparse_pending_fetches( | ||
| 1733 | 1741 | current: PendingFetch, |
| 1734 | 1742 | queue: &[PendingFetch], |
| 1735 | 1743 | prepared: &mut HashMap<ArchiveMemberKey, Result<LoadedArchiveMember, FetchError>>, |
| 1744 | + parallel_jobs: usize, | |
| 1736 | 1745 | ) { |
| 1737 | 1746 | let mut seen = HashSet::new(); |
| 1738 | 1747 | let mut keys = Vec::new(); |
@@ -1747,7 +1756,7 @@ fn preparse_pending_fetches( | ||
| 1747 | 1756 | } |
| 1748 | 1757 | keys.push(key); |
| 1749 | 1758 | } |
| 1750 | - for (key, result) in load_archive_members_parallel(inputs, keys) { | |
| 1759 | + for (key, result) in load_archive_members_parallel(inputs, keys, parallel_jobs) { | |
| 1751 | 1760 | prepared.insert(key, result); |
| 1752 | 1761 | } |
| 1753 | 1762 | } |
tests/determinism.rsmodified@@ -150,6 +150,12 @@ fn repeated_parallel_archive_fetches_are_byte_identical() { | ||
| 150 | 150 | fn assert_repeated_links_identical(inputs: Vec<PathBuf>, root: &Path, label: &str) { |
| 151 | 151 | let baseline = link_once(&inputs, root, &format!("{label}-baseline")) |
| 152 | 152 | .expect("baseline deterministic link"); |
| 153 | + let serial = link_once_with_jobs(&inputs, root, &format!("{label}-serial"), Some(1)) | |
| 154 | + .expect("single-worker deterministic link"); | |
| 155 | + assert_eq!( | |
| 156 | + serial, baseline, | |
| 157 | + "{label}: single-worker link differed from default parallel link" | |
| 158 | + ); | |
| 153 | 159 | let run_count = determinism_run_count(); |
| 154 | 160 | let jobs = determinism_jobs(run_count); |
| 155 | 161 | let queue = Arc::new(Mutex::new((0..run_count).collect::<VecDeque<_>>())); |
@@ -200,6 +206,15 @@ fn assert_repeated_links_identical(inputs: Vec<PathBuf>, root: &Path, label: &st | ||
| 200 | 206 | } |
| 201 | 207 | |
| 202 | 208 | fn link_once(inputs: &[PathBuf], root: &Path, run_name: &str) -> Result<Vec<u8>, String> { |
| 209 | + link_once_with_jobs(inputs, root, run_name, None) | |
| 210 | +} | |
| 211 | + | |
| 212 | +fn link_once_with_jobs( | |
| 213 | + inputs: &[PathBuf], | |
| 214 | + root: &Path, | |
| 215 | + run_name: &str, | |
| 216 | + jobs: Option<usize>, | |
| 217 | +) -> Result<Vec<u8>, String> { | |
| 203 | 218 | let dir = root.join(run_name); |
| 204 | 219 | fs::create_dir_all(&dir).map_err(|e| format!("create {}: {e}", dir.display()))?; |
| 205 | 220 | let out = dir.join("deterministic.out"); |
@@ -207,6 +222,7 @@ fn link_once(inputs: &[PathBuf], root: &Path, run_name: &str) -> Result<Vec<u8>, | ||
| 207 | 222 | inputs: inputs.to_vec(), |
| 208 | 223 | output: Some(out.clone()), |
| 209 | 224 | kind: OutputKind::Executable, |
| 225 | + jobs, | |
| 210 | 226 | ..LinkOptions::default() |
| 211 | 227 | }; |
| 212 | 228 | Linker::run(&opts).map_err(|e| format!("link {}: {e}", out.display()))?; |
tests/resolve_integration.rsmodified@@ -164,8 +164,8 @@ fn resolve_pipeline_pulls_archive_member_and_flags_missing() { | ||
| 164 | 164 | "unexpected duplicates in seeding: {:?}", |
| 165 | 165 | seed_report.duplicates |
| 166 | 166 | ); |
| 167 | - let drain_report = | |
| 168 | - drain_fetches(&mut inputs, &mut table, seed_report.pending_fetches).expect("drain_fetches"); | |
| 167 | + let drain_report = drain_fetches(&mut inputs, &mut table, seed_report.pending_fetches, 1) | |
| 168 | + .expect("drain_fetches"); | |
| 169 | 169 | assert!( |
| 170 | 170 | drain_report.fetched_members >= 1, |
| 171 | 171 | "expected at least one archive member fetched; got {}", |
tests/snapshots/help.txtmodified@@ -39,6 +39,7 @@ Options: | ||
| 39 | 39 | Select chained fixups vs classic dyld info |
| 40 | 40 | -all_load Force-load every archive member |
| 41 | 41 | -force_load <archive> Force-load one archive |
| 42 | + -j <jobs> Limit parallel worker jobs (`1` disables parallelism) | |
| 42 | 43 | -Wl,<arg,arg,...> Normalize comma-separated driver flags |
| 43 | 44 | --dump <path> Dump a Mach-O file summary |
| 44 | 45 | --dump-archive <path> Dump an archive summary |