Guest User

Untitled

a guest
Feb 6th, 2026
62
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 22.37 KB | None | 0 0
  1. -------------------------
  2. Cargo.toml
  3. -------------------------
  4. version = "0.1.0"
  5. edition = "2021"
  6.  
  7. [dependencies]
  8. libz-sys = { version = "1", features = ["static"] }
  9. rayon = "1"
  10. serde = { version = "1", features = ["derive"] }
  11. serde_json = "1"
  12.  
  13. [profile.release]
  14. opt-level = 3
  15. lto = true
  16. -------------------------
  17.  
  18. -------------------------
  19. main.rs
  20. -------------------------
  21. //! Fast zlib stream fixer using Rust + Rayon parallelism.
  22. //!
  23. //! Takes a base64 file + stream byte offsets, finds b64 character changes
  24. //! that make the zlib stream decompress. Uses raw inflate (ignoring checksum)
  25. //! so the caller can do Adler-32 repair afterward.
  26. //!
  27. //! Usage: stream_fixer <b64_file> <stream_start> <stream_end> [options]
  28. //! Options:
  29. //! --max-iter N Max greedy iterations (default 30)
  30. //! --max-nodes N Max DFS nodes (default 100000)
  31. //! --window N Search window in b64 chars (default 600)
  32. //! --branch N DFS branching factor (default 3)
  33. //! --max-depth N DFS max depth (default 20)
  34.  
  35. use libz_sys::*;
  36. use rayon::prelude::*;
  37. use serde::Serialize;
  38. use std::mem::MaybeUninit;
  39. use std::{env, fs, process};
  40. use std::os::raw::c_int;
  41.  
  42. // ── Base64 ──────────────────────────────────────────────────────────
  43.  
  44. const B64: &[u8; 64] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
  45.  
  46. fn b64_table() -> [u8; 128] {
  47. let mut t = [0u8; 128];
  48. for (i, &c) in B64.iter().enumerate() {
  49. t[c as usize] = i as u8;
  50. }
  51. t
  52. }
  53.  
  54. #[inline]
  55. fn decode_group(b64: &[u8], pos: usize, t: &[u8; 128]) -> [u8; 3] {
  56. let a = t[b64[pos] as usize] as u32;
  57. let b = t[b64[pos + 1] as usize] as u32;
  58. let c = t[b64[pos + 2] as usize] as u32;
  59. let d = t[b64[pos + 3] as usize] as u32;
  60. [
  61. ((a << 2) | (b >> 4)) as u8,
  62. (((b & 0xF) << 4) | (c >> 2)) as u8,
  63. (((c & 3) << 6) | d) as u8,
  64. ]
  65. }
  66.  
  67. #[inline]
  68. fn pdf_to_b64(pdf_pos: usize) -> usize {
  69. (pdf_pos / 3) * 4
  70. }
  71.  
  72. fn decode_range(b64: &[u8], start: usize, end: usize, t: &[u8; 128]) -> Vec<u8> {
  73. let b64_start = pdf_to_b64(start);
  74. let b64_end = (pdf_to_b64(end) + 4).min(b64.len());
  75. let mut decoded = Vec::with_capacity((b64_end - b64_start) / 4 * 3 + 3);
  76. let mut pos = b64_start;
  77. while pos + 3 < b64.len() && pos < b64_end {
  78. let g = decode_group(b64, pos, t);
  79. decoded.extend_from_slice(&g);
  80. pos += 4;
  81. }
  82. let offset = start - (b64_start / 4) * 3;
  83. let len = end - start;
  84. let actual_len = len.min(decoded.len().saturating_sub(offset));
  85. decoded[offset..offset + actual_len].to_vec()
  86. }
  87.  
  88. // ── Inflate helpers (raw libz via MaybeUninit for safety) ───────────
  89.  
  90. /// Create a zeroed z_stream via MaybeUninit (avoids the zeroed() panic on fn ptrs).
  91. /// Returns a raw pointer to a stack-allocated z_stream. Caller must inflateEnd.
  92. macro_rules! zstream_init {
  93. ($strm:ident) => {
  94. let mut $strm = MaybeUninit::<z_stream>::uninit();
  95. std::ptr::write_bytes($strm.as_mut_ptr(), 0, 1);
  96. let $strm = $strm.as_mut_ptr();
  97. };
  98. }
  99.  
  100. /// Test if data decompresses as zlib (header + checksum).
  101. fn can_inflate(data: &[u8]) -> bool {
  102. unsafe {
  103. zstream_init!(strm);
  104. if inflateInit_(strm, zlibVersion(), std::mem::size_of::<z_stream>() as c_int) != Z_OK {
  105. return false;
  106. }
  107. (*strm).next_in = data.as_ptr() as *mut _;
  108. (*strm).avail_in = data.len() as u32;
  109. let mut out = [0u8; 65536];
  110. loop {
  111. (*strm).next_out = out.as_mut_ptr();
  112. (*strm).avail_out = out.len() as u32;
  113. let ret = inflate(strm, Z_FINISH);
  114. match ret {
  115. Z_STREAM_END => { inflateEnd(strm); return true; }
  116. Z_OK => continue,
  117. _ => { inflateEnd(strm); return false; }
  118. }
  119. }
  120. }
  121. }
  122.  
  123. /// Test if raw deflate data decompresses. Tries stripping 0/2/4 bytes
  124. /// from the end (checksum area) to avoid false negatives from checksum corruption.
  125. fn can_inflate_raw(data: &[u8]) -> bool {
  126. if data.len() < 6 { return false; }
  127. for trim in [4usize, 2, 0] {
  128. let end = data.len().saturating_sub(trim);
  129. if end <= 2 { continue; }
  130. if try_raw_inflate(&data[2..end]) { return true; }
  131. }
  132. false
  133. }
  134.  
  135. fn try_raw_inflate(raw: &[u8]) -> bool {
  136. if raw.is_empty() { return false; }
  137. unsafe {
  138. zstream_init!(strm);
  139. if inflateInit2_(strm, -15, zlibVersion(), std::mem::size_of::<z_stream>() as c_int) != Z_OK {
  140. return false;
  141. }
  142. (*strm).next_in = raw.as_ptr() as *mut _;
  143. (*strm).avail_in = raw.len() as u32;
  144. let mut out = [0u8; 65536];
  145. loop {
  146. (*strm).next_out = out.as_mut_ptr();
  147. (*strm).avail_out = out.len() as u32;
  148. let ret = inflate(strm, Z_FINISH);
  149. match ret {
  150. Z_STREAM_END => { inflateEnd(strm); return true; }
  151. Z_OK => continue,
  152. _ => { inflateEnd(strm); return false; }
  153. }
  154. }
  155. }
  156. }
  157.  
  158. /// Check if error is in the checksum area (last 6 bytes). If so, the data
  159. /// is likely OK and just needs Adler-32 repair.
  160. fn is_checksum_error(data: &[u8]) -> bool {
  161. let err = find_corruption_offset(data);
  162. err + 6 >= data.len()
  163. }
  164.  
  165. /// Test if a prefix of raw deflate data inflates without data error.
  166. fn try_raw_prefix(raw: &[u8], len: usize) -> bool {
  167. unsafe {
  168. zstream_init!(strm);
  169. if inflateInit2_(strm, -15, zlibVersion(), std::mem::size_of::<z_stream>() as c_int) != Z_OK {
  170. return false;
  171. }
  172. (*strm).next_in = raw.as_ptr() as *mut _;
  173. (*strm).avail_in = len as u32;
  174. let mut out = [0u8; 65536];
  175. loop {
  176. (*strm).next_out = out.as_mut_ptr();
  177. (*strm).avail_out = out.len() as u32;
  178. let ret = inflate(strm, Z_SYNC_FLUSH);
  179. match ret {
  180. Z_OK => {
  181. if (*strm).avail_in == 0 { inflateEnd(strm); return true; }
  182. }
  183. Z_STREAM_END => { inflateEnd(strm); return true; }
  184. Z_BUF_ERROR => { inflateEnd(strm); return true; }
  185. _ => { inflateEnd(strm); return false; }
  186. }
  187. }
  188. }
  189. }
  190.  
  191. /// Binary search for the corruption offset in a zlib stream.
  192. fn find_corruption_offset(data: &[u8]) -> usize {
  193. if data.len() < 3 { return 0; }
  194. let raw = &data[2..];
  195. let mut lo = 0usize;
  196. let mut hi = raw.len();
  197. while lo + 1 < hi {
  198. let mid = (lo + hi) / 2;
  199. if try_raw_prefix(raw, mid) {
  200. lo = mid;
  201. } else {
  202. hi = mid;
  203. }
  204. }
  205. lo + 2
  206. }
  207.  
  208. /// Check if a change passes the known error position.
  209. fn passes_err_pos(data: &[u8], err_pos: usize) -> bool {
  210. if err_pos < 3 || data.len() < 3 { return false; }
  211. let test_len = (data.len() - 2).min(err_pos + 3);
  212. try_raw_prefix(&data[2..], test_len)
  213. }
  214.  
  215. // ── Confusion pairs ─────────────────────────────────────────────────
  216.  
  217. fn confusion_subs(ch: u8) -> &'static [u8] {
  218. match ch {
  219. b'l' => &[b'1', b'I'],
  220. b'1' => &[b'l', b'I'],
  221. b'I' => &[b'l', b'1'],
  222. b'O' => &[b'0'],
  223. b'0' => &[b'O'],
  224. b'5' => &[b'S'],
  225. b'S' => &[b'5'],
  226. b'8' => &[b'B'],
  227. b'B' => &[b'8'],
  228. _ => &[],
  229. }
  230. }
  231.  
  232. // ── Candidate generation ────────────────────────────────────────────
  233.  
  234. #[derive(Clone)]
  235. struct Change {
  236. b64_pos: usize,
  237. new_char: u8,
  238. /// (stream_data_offset, new_byte_value)
  239. byte_changes: Vec<(usize, u8)>,
  240. }
  241.  
  242. fn generate_candidates(
  243. b64: &[u8], t: &[u8; 128],
  244. stream_start: usize, stream_end: usize,
  245. b64_lo: usize, b64_hi: usize,
  246. confusion_only: bool,
  247. ) -> Vec<Change> {
  248. let mut candidates = Vec::new();
  249. for b64_pos in b64_lo..b64_hi {
  250. if b64_pos + 3 >= b64.len() { break; }
  251. let orig = b64[b64_pos];
  252. let group_start = (b64_pos / 4) * 4;
  253. if group_start + 3 >= b64.len() { continue; }
  254. let pdf_start = (group_start / 4) * 3;
  255. let old_bytes = decode_group(b64, group_start, t);
  256.  
  257. let subs = confusion_subs(orig);
  258. let chars_to_try: Vec<u8> = if confusion_only {
  259. subs.to_vec()
  260. } else {
  261. let mut v = subs.to_vec();
  262. for &c in B64 {
  263. if c != orig && !v.contains(&c) {
  264. v.push(c);
  265. }
  266. }
  267. v
  268. };
  269.  
  270. for new_char in chars_to_try {
  271. let idx_in_group = b64_pos - group_start;
  272. let mut group = [b64[group_start], b64[group_start+1], b64[group_start+2], b64[group_start+3]];
  273. group[idx_in_group] = new_char;
  274. let new_bytes = [
  275. ((t[group[0] as usize] as u32) << 2 | (t[group[1] as usize] as u32) >> 4) as u8,
  276. (((t[group[1] as usize] as u32) & 0xF) << 4 | (t[group[2] as usize] as u32) >> 2) as u8,
  277. (((t[group[2] as usize] as u32) & 3) << 6 | t[group[3] as usize] as u32) as u8,
  278. ];
  279. let mut byte_changes = Vec::new();
  280. for k in 0..3 {
  281. let off = pdf_start + k;
  282. if off >= stream_start && off < stream_end && old_bytes[k] != new_bytes[k] {
  283. byte_changes.push((off - stream_start, new_bytes[k]));
  284. }
  285. }
  286. if byte_changes.is_empty() { continue; }
  287. candidates.push(Change { b64_pos, new_char, byte_changes });
  288. }
  289. }
  290. candidates
  291. }
  292.  
  293. // ── Parallel evaluation ─────────────────────────────────────────────
  294.  
  295. /// Evaluate candidates in parallel. Returns (index, new_error_pos) pairs.
  296. /// new_error_pos = usize::MAX means fully fixed.
  297. fn eval_candidates(
  298. stream_data: &[u8],
  299. candidates: &[Change],
  300. err_pos: usize,
  301. ) -> Vec<(usize, usize)> {
  302. candidates.par_iter().enumerate().filter_map(|(idx, cand)| {
  303. let mut data = stream_data.to_vec();
  304. for &(off, val) in &cand.byte_changes {
  305. if off < data.len() { data[off] = val; }
  306. }
  307. if can_inflate_raw(&data) {
  308. return Some((idx, usize::MAX));
  309. }
  310. if passes_err_pos(&data, err_pos) {
  311. let new_err = find_corruption_offset(&data);
  312. if new_err > err_pos {
  313. return Some((idx, new_err));
  314. }
  315. }
  316. None
  317. }).collect()
  318. }
  319.  
  320. // ── Iterative greedy search ─────────────────────────────────────────
  321.  
  322. fn iterative_search(
  323. b64: &mut Vec<u8>, t: &[u8; 128],
  324. stream_start: usize, stream_end: usize,
  325. max_iter: usize, window: usize,
  326. ) -> SearchResult {
  327. let mut stream_data = decode_range(b64, stream_start, stream_end, t);
  328. let mut changes: Vec<(usize, u8, u8)> = Vec::new();
  329.  
  330. for iter in 0..max_iter {
  331. if can_inflate_raw(&stream_data) {
  332. return SearchResult::success(&changes, format!("{} fixes in {} iter", changes.len(), iter));
  333. }
  334.  
  335. let err_pos = find_corruption_offset(&stream_data);
  336. // Error in checksum area = raw data is fixed, caller does Adler-32 repair
  337. if err_pos + 6 >= stream_data.len() {
  338. eprintln!();
  339. return SearchResult::success(&changes,
  340. format!("{} fixes + checksum needed", changes.len()));
  341. }
  342.  
  343. let b64_center = pdf_to_b64(stream_start + err_pos);
  344. let b64_lo = pdf_to_b64(stream_start).max(b64_center.saturating_sub(window));
  345. let b64_hi = (pdf_to_b64(stream_end) + 4).min(b64_center + 20).min(b64.len());
  346.  
  347. let candidates = generate_candidates(b64, t, stream_start, stream_end, b64_lo, b64_hi, false);
  348. if candidates.is_empty() { break; }
  349.  
  350. let results = eval_candidates(&stream_data, &candidates, err_pos);
  351. if results.is_empty() { break; }
  352.  
  353. let &(best_idx, best_err) = results.iter().max_by_key(|r| r.1).unwrap();
  354. let cand = &candidates[best_idx];
  355.  
  356. let old_char = b64[cand.b64_pos];
  357. b64[cand.b64_pos] = cand.new_char;
  358. for &(off, val) in &cand.byte_changes {
  359. stream_data[off] = val;
  360. }
  361. changes.push((cand.b64_pos, old_char, cand.new_char));
  362.  
  363. eprint!("\r iter {}: err {} -> {}, b64[{}] '{}' -> '{}'",
  364. iter, err_pos,
  365. if best_err == usize::MAX { "OK".to_string() } else { best_err.to_string() },
  366. cand.b64_pos, old_char as char, cand.new_char as char);
  367.  
  368. if best_err == usize::MAX {
  369. eprintln!();
  370. return SearchResult::success(&changes, format!("{} fixes in {} iter", changes.len(), iter + 1));
  371. }
  372. }
  373.  
  374. // Final check
  375. if can_inflate_raw(&stream_data) {
  376. eprintln!();
  377. return SearchResult::success(&changes, format!("{} fixes", changes.len()));
  378. }
  379.  
  380. // Revert
  381. for &(pos, old, _) in changes.iter().rev() {
  382. b64[pos] = old;
  383. }
  384. eprintln!();
  385. SearchResult::fail(format!("iterative: {} partial reverted", changes.len()))
  386. }
  387.  
  388. // ── DFS with backtracking ───────────────────────────────────────────
  389.  
  390. struct DfsCtx<'a> {
  391. b64: &'a mut Vec<u8>,
  392. t: &'a [u8; 128],
  393. stream_data: Vec<u8>,
  394. stream_start: usize,
  395. stream_end: usize,
  396. applied: Vec<(usize, u8, u8, Vec<(usize, u8)>)>, // (b64_pos, old_char, new_char, old_bytes)
  397. node_count: usize,
  398. max_nodes: usize,
  399. max_depth: usize,
  400. window: usize,
  401. branch: usize,
  402. }
  403.  
  404. impl<'a> DfsCtx<'a> {
  405. fn search(&mut self, depth: usize) -> bool {
  406. if can_inflate_raw(&self.stream_data) { return true; }
  407. if depth >= self.max_depth || self.node_count >= self.max_nodes { return false; }
  408. self.node_count += 1;
  409.  
  410. let err_pos = find_corruption_offset(&self.stream_data);
  411. // Error in checksum area = raw data is OK
  412. if err_pos + 6 >= self.stream_data.len() { return true; }
  413.  
  414. let b64_center = pdf_to_b64(self.stream_start + err_pos);
  415.  
  416. // Pass 1: confusion pairs in wide window
  417. let b64_lo = pdf_to_b64(self.stream_start).max(b64_center.saturating_sub(self.window));
  418. let b64_hi = (pdf_to_b64(self.stream_end) + 4).min(b64_center + 20).min(self.b64.len());
  419. let mut candidates = generate_candidates(self.b64, self.t, self.stream_start, self.stream_end, b64_lo, b64_hi, true);
  420.  
  421. // Pass 2: all chars in tight window if no confusion candidates
  422. if candidates.is_empty() {
  423. let tight_lo = pdf_to_b64(self.stream_start).max(b64_center.saturating_sub(60));
  424. let tight_hi = (pdf_to_b64(self.stream_end) + 4).min(b64_center + 10).min(self.b64.len());
  425. candidates = generate_candidates(self.b64, self.t, self.stream_start, self.stream_end, tight_lo, tight_hi, false);
  426. }
  427. if candidates.is_empty() { return false; }
  428.  
  429. let results = eval_candidates(&self.stream_data, &candidates, err_pos);
  430. if results.is_empty() { return false; }
  431.  
  432. let mut sorted: Vec<(usize, usize)> = results;
  433. sorted.sort_by(|a, b| b.1.cmp(&a.1));
  434.  
  435. for &(idx, improvement) in sorted.iter().take(self.branch) {
  436. if self.node_count >= self.max_nodes { break; }
  437.  
  438. let cand = candidates[idx].clone();
  439. let old_char = self.b64[cand.b64_pos];
  440.  
  441. // Save old bytes for revert
  442. let old_bytes: Vec<(usize, u8)> = cand.byte_changes.iter()
  443. .map(|&(off, _)| (off, self.stream_data[off]))
  444. .collect();
  445.  
  446. // Apply
  447. self.b64[cand.b64_pos] = cand.new_char;
  448. for &(off, val) in &cand.byte_changes {
  449. self.stream_data[off] = val;
  450. }
  451. self.applied.push((cand.b64_pos, old_char, cand.new_char, old_bytes.clone()));
  452.  
  453. if improvement == usize::MAX || self.search(depth + 1) {
  454. return true;
  455. }
  456.  
  457. // Revert
  458. let (pos, old, _, bytes) = self.applied.pop().unwrap();
  459. self.b64[pos] = old;
  460. for (off, val) in bytes {
  461. self.stream_data[off] = val;
  462. }
  463. }
  464.  
  465. false
  466. }
  467. }
  468.  
  469. fn dfs_search(
  470. b64: &mut Vec<u8>, t: &[u8; 128],
  471. stream_start: usize, stream_end: usize,
  472. max_nodes: usize, max_depth: usize, window: usize, branch: usize,
  473. ) -> SearchResult {
  474. let stream_data = decode_range(b64, stream_start, stream_end, t);
  475. if can_inflate_raw(&stream_data) {
  476. return SearchResult::success(&[], "already OK".into());
  477. }
  478.  
  479. let mut ctx = DfsCtx {
  480. b64, t, stream_data, stream_start, stream_end,
  481. applied: Vec::new(),
  482. node_count: 0, max_nodes, max_depth, window, branch,
  483. };
  484.  
  485. if ctx.search(0) {
  486. let changes: Vec<(usize, u8, u8)> = ctx.applied.iter()
  487. .map(|(pos, old, new, _)| (*pos, *old, *new))
  488. .collect();
  489. eprintln!(" DFS: {} fixes, {} nodes explored", changes.len(), ctx.node_count);
  490. SearchResult::success(&changes, format!("DFS {} fixes ({} nodes)", changes.len(), ctx.node_count))
  491. } else {
  492. // Revert (should already be reverted by backtracking, but just in case)
  493. for (pos, old, _, bytes) in ctx.applied.iter().rev() {
  494. ctx.b64[*pos] = *old;
  495. for &(off, val) in bytes {
  496. ctx.stream_data[off] = val;
  497. }
  498. }
  499. eprintln!(" DFS: exhausted ({} nodes)", ctx.node_count);
  500. SearchResult::fail(format!("DFS exhausted ({} nodes)", ctx.node_count))
  501. }
  502. }
  503.  
  504. // ── Output ──────────────────────────────────────────────────────────
  505.  
  506. #[derive(Serialize)]
  507. struct ChangeOutput {
  508. b64_pos: usize,
  509. old_char: String,
  510. new_char: String,
  511. }
  512.  
  513. #[derive(Serialize)]
  514. struct SearchResult {
  515. fixed: bool,
  516. changes: Vec<ChangeOutput>,
  517. desc: String,
  518. }
  519.  
  520. impl SearchResult {
  521. fn success(changes: &[(usize, u8, u8)], desc: String) -> Self {
  522. SearchResult {
  523. fixed: true,
  524. changes: changes.iter().map(|&(pos, old, new)| ChangeOutput {
  525. b64_pos: pos,
  526. old_char: String::from(old as char),
  527. new_char: String::from(new as char),
  528. }).collect(),
  529. desc,
  530. }
  531. }
  532. fn fail(desc: String) -> Self {
  533. SearchResult { fixed: false, changes: vec![], desc }
  534. }
  535. }
  536.  
  537. // ── Main ────────────────────────────────────────────────────────────
  538.  
  539. fn main() {
  540. // Rayon threads need enough stack for zlib's internal state + output buffers
  541. rayon::ThreadPoolBuilder::new()
  542. .stack_size(4 * 1024 * 1024) // 4MB per thread
  543. .build_global()
  544. .ok();
  545.  
  546. let args: Vec<String> = env::args().collect();
  547. if args.len() < 4 {
  548. eprintln!("Usage: stream_fixer <b64_file> <stream_start> <stream_end> [options]");
  549. eprintln!("Options: --max-iter N --max-nodes N --window N --branch N --max-depth N");
  550. process::exit(1);
  551. }
  552.  
  553. let b64_file = &args[1];
  554. let stream_start: usize = args[2].parse().expect("invalid stream_start");
  555. let stream_end: usize = args[3].parse().expect("invalid stream_end");
  556.  
  557. // Parse options
  558. let mut max_iter = 30usize;
  559. let mut max_nodes = 100_000usize;
  560. let mut window = 600usize;
  561. let mut branch = 3usize;
  562. let mut max_depth = 20usize;
  563.  
  564. let mut i = 4;
  565. while i < args.len() {
  566. match args[i].as_str() {
  567. "--max-iter" => { i += 1; max_iter = args[i].parse().unwrap(); }
  568. "--max-nodes" => { i += 1; max_nodes = args[i].parse().unwrap(); }
  569. "--window" => { i += 1; window = args[i].parse().unwrap(); }
  570. "--branch" => { i += 1; branch = args[i].parse().unwrap(); }
  571. "--max-depth" => { i += 1; max_depth = args[i].parse().unwrap(); }
  572. _ => { eprintln!("Unknown option: {}", args[i]); process::exit(1); }
  573. }
  574. i += 1;
  575. }
  576.  
  577. eprintln!("stream_fixer: stream [{}, {}), {}b, window={}, max_iter={}, max_nodes={}, branch={}, depth={}",
  578. stream_start, stream_end, stream_end - stream_start,
  579. window, max_iter, max_nodes, branch, max_depth);
  580.  
  581. // Read base64
  582. let raw = fs::read_to_string(b64_file).expect("cannot read b64 file");
  583. let clean: String = raw.chars().filter(|c| !c.is_whitespace()).collect();
  584. let mut b64 = clean.into_bytes();
  585. let t = b64_table();
  586.  
  587. // Check current state
  588. let stream_data = decode_range(&b64, stream_start, stream_end, &t);
  589. let err_pos = find_corruption_offset(&stream_data);
  590. let raw_ok = can_inflate_raw(&stream_data);
  591. let full_ok = can_inflate(&stream_data);
  592. eprintln!(" initial: raw_ok={}, full_ok={}, err_pos={}/{}", raw_ok, full_ok, err_pos, stream_data.len());
  593.  
  594. if full_ok {
  595. let result = SearchResult::success(&[], "already OK".into());
  596. println!("{}", serde_json::to_string(&result).unwrap());
  597. return;
  598. }
  599.  
  600. if raw_ok {
  601. let result = SearchResult::success(&[], "raw OK, needs checksum only".into());
  602. println!("{}", serde_json::to_string(&result).unwrap());
  603. return;
  604. }
  605.  
  606. // Phase 1: Iterative greedy
  607. eprintln!(" Phase 1: iterative greedy...");
  608. let result = iterative_search(&mut b64, &t, stream_start, stream_end, max_iter, window);
  609. if result.fixed {
  610. println!("{}", serde_json::to_string(&result).unwrap());
  611. return;
  612. }
  613.  
  614. // Phase 2: DFS with backtracking
  615. eprintln!(" Phase 2: DFS backtracking...");
  616. let result = dfs_search(&mut b64, &t, stream_start, stream_end, max_nodes, max_depth, window, branch);
  617. println!("{}", serde_json::to_string(&result).unwrap());
  618. }
  619.  
Add Comment
Please, Sign In to add comment