Chryses

Untitled

Nov 23rd, 2025 (edited)
135
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 88.04 KB | None | 0 0
  1. #!/usr/bin/env python3
  2.  
  3. import os
  4. import sys
  5. import math
  6. import logging
  7. import requests
  8. import subprocess
  9. import shlex
  10. import time
  11. import asyncio
  12. import json
  13. import platform
  14. import tempfile
  15. import glob
  16. import argparse
  17. import shutil
  18. import pycountry
  19. from uuid import uuid4
  20. from shutil import which
  21. from types import SimpleNamespace
  22. from pathlib import Path
  23. from datetime import datetime
  24.  
  25. from dataclasses import dataclass
  26. from typing import Tuple, Dict, Any, Optional, List
  27.  
  28. # Configure the logger
  29. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
  30. logger = logging.getLogger(__name__)
  31.  
  32. # Tentativo import Telethon
  33. try:
  34.     from telethon import TelegramClient
  35.     from telethon.tl.types import DocumentAttributeVideo, DocumentAttributeFilename
  36.     from telethon.errors import FloodWaitError, FilePartsInvalidError
  37.     TELETHON_AVAILABLE = True
  38.     print("✅ Telethon importato correttamente")
  39. except ImportError as e:
  40.     TELETHON_AVAILABLE = False
  41.     logger.error(f"❌ Telethon non disponibile: {e}")
  42.  
  43. ########## CONFIGURATIONS ##########
  44. # Telegram
  45. telegram_bot_token = "***"
  46. owner_chatid = "***"
  47.  
  48. # Radarr
  49. radarr_api = "***"
  50. radarr_host = "***"
  51. radarr_port = "***"
  52.  
  53. # Sonarr
  54. sonarr_api = "***"
  55. sonarr_host = "***"
  56. sonarr_port = "***"
  57. ########## CONFIGURATIONS ##########
  58.  
  59. keyword = "searcharr"
  60.  
  61. chunk_bytes = 512 * 1024 # 512 KiB, limite Telethon/Telegram
  62. max_parts_default = 4000 # limite parti tipico lato server (non-Premium)
  63. safe_limit_factor = 1
  64. safe_limit_bytes = int(chunk_bytes * max_parts_default * safe_limit_factor)
  65.  
  66. REENCODE_OUTPUT_EXT = ".mkv"
  67.  
  68. FORCE_MP4_PARTS = False  # se True e manca MP4Box, remux dei .mkv in .mp4 via ffmpeg -c copy + faststart
  69.  
  70. docker_mount_point = "/srv/dev-disk-by-uuid-ecc70552-d999-49f3-b5e4-9a0250e1c559/Sharing:/Sharing"
  71. whisperai_model_mount_point = "/srv/dev-disk-by-uuid-ecc70552-d999-49f3-b5e4-9a0250e1c559/Sharing/Telegram-bot/audiomedia-models:/models"
  72.  
  73. ENV_DUMP = False # True/False
  74. ENV_DUMP_FILE = "/Sharing/Telegram-bot/Sonarr-env.txt" # percorso del file di output
  75.  
  76. # --- Politiche HEVC / Qualità ---
  77. # Floor minimi H.265 per film/serie live-action (kbps). Regolali a piacere.
  78. H265_FLOOR_KBPS = {
  79.     2160: 12000,
  80.     1440:  8100,
  81.     1080:  1000,
  82.     720:    750,
  83.     576:    450,
  84.     480:    420,
  85. }
  86.  
  87. MAX_TARGET_HEIGHT = 1080
  88.  
  89. # Stima “sicura” (rientro target) — usa già nel tuo codice
  90. RETRY_ON_OVERSHOOT = True       # un solo retry abbassando il video_kbps se sfora safe_limit_bytes
  91. RETRY_SAFETY_PCT = 0.005
  92. FUDGE_VBR_FIRST_TRY = 0.0135
  93.  
  94. # Audio per traccia (se non riusciamo a leggere il bitrate reale)
  95. AUDIO_KBPS_PER_TRACK = 96
  96. FORCE_STEREO = True              # forza sempre 2 canali
  97.  
  98. # Bitrate video minimo tecnico (guard-rail)
  99. MIN_VIDEO_KBPS = 100
  100.  
  101. # Preserva 10‑bit/HDR in uscita (usa p010le + main10)
  102. NVENC_PRESERVE_10BIT = True
  103.  
  104. def dump_env():
  105.     if not ENV_DUMP:
  106.         return None
  107.     # crea la cartella se serve
  108.     d = os.path.dirname(ENV_DUMP_FILE)
  109.     if d:
  110.         os.makedirs(d, exist_ok=True)
  111.     # usa "a" invece di "w" se vuoi accumulare più run
  112.     with open(ENV_DUMP_FILE, "w", encoding="utf-8") as f:
  113.         for k, v in sorted(os.environ.items()):
  114.             f.write(f"{k}={v}\n")
  115.     return ENV_DUMP_FILE
  116.  
  117. def _safe_run(cmd: list[str]) -> None:
  118.     try:
  119.         subprocess.run(cmd, check=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
  120.     except Exception:
  121.         pass
  122.  
  123. def update_mkv_metadata_final(out_path: Path, src_path: Path, eff_n_audio: int) -> None:
  124.     """
  125.    Esegue SOLO quando il file finale è nei limiti.
  126.    - Reset e rigenerazione dei Track Statistics Tags (fix per MediaInfo: bitrate/bytes/durata corretti)
  127.    - Segment title = nome file sorgente (senza estensione)
  128.    - Primo audio default; gli altri non default
  129.    - Commenti/encoded_by
  130.    Richiede mkvpropedit in PATH. Se assente, esce silenziosamente.
  131.    """
  132.     if out_path.suffix.lower() != '.mkv':
  133.         return
  134.     if shutil.which('mkvpropedit') is None:
  135.         return
  136.  
  137.     # 1) Track Statistics Tags: elimina qualunque residuo e rigenera dal contenuto attuale
  138.     _safe_run(['mkvpropedit', str(out_path), '--delete-track-statistics-tags'])
  139.     _safe_run(['mkvpropedit', str(out_path), '--add-track-statistics-tags'])
  140.  
  141.     # 2) Segment title
  142.     title = src_path.stem
  143.     _safe_run(['mkvpropedit', str(out_path), '--set', f'title={title}'])
  144.  
  145.     # 3) Flag default audio: a1 default, altri no (mkvpropedit conta da 1)
  146.     if eff_n_audio > 0:
  147.         _safe_run(['mkvpropedit', str(out_path), '--edit', 'track:a1', '--set', 'flag-default=1'])
  148.         for i in range(2, eff_n_audio+1):
  149.             _safe_run(['mkvpropedit', str(out_path), '--edit', f'track:a{i}', '--set', 'flag-default=0'])
  150.  
  151.     # 4) Commenti globali (facoltativi)
  152.     comment = f'Encoded HEVC NVENC + AAC LC @ {AUDIO_KBPS_PER_TRACK} kbps per track'
  153.     _safe_run(['mkvpropedit', str(out_path), '--edit', 'info', '--set', f'comment={comment}'])
  154.     _safe_run(['mkvpropedit', str(out_path), '--edit', 'info', '--set', 'encoded_by=auto-compressor'])
  155.  
  156. def _analyze_audio_languages_with_docker(src: Path) -> list[dict]:
  157.     host_prefix, container_prefix = docker_mount_point.split(":", 1)
  158.     p = src.resolve().as_posix()
  159.     if p.startswith(host_prefix):
  160.         container_file = container_prefix + p[len(host_prefix):]
  161.     else:
  162.         container_file = p  # se non è nel mount, uso così com'è (niente assert)
  163.  
  164.     cmd = [
  165.         "docker", "run", "--rm", "--gpus", "all",
  166.         "-v", docker_mount_point,
  167.         "-v", whisperai_model_mount_point,
  168.         "chryses/audiomedia-checker:latest",
  169.         "--gpu", "--check-all-tracks", "--dry-run", "--json",
  170.         "--file", container_file,
  171.     ]
  172.     res = subprocess.run(cmd, capture_output=True, text=True, check=True)
  173.     return json.loads(res.stdout.strip())  # es.: [{"track":1,"language":"ita"}, ...]
  174.  
  175. def _decide_audio_selection(profile_language: str, langs: List[dict]) -> Tuple[bool, List[int], Optional[str], str]:
  176.     """
  177.    Ritorna:
  178.      - filter_enabled: bool → True se dobbiamo filtrare (cioè rimuovere alcune tracce)
  179.      - keep_relidx: lista 0-based degli indici relativi audio da tenere (validi per ffmpeg 0:a:{idx})
  180.      - notify_msg: eventuale messaggio per owner_chatid
  181.      - summary_str: riepilogo tracce tipo "#1=ita, #2=eng"
  182.    Regole:
  183.      - 1 traccia: tieni sempre; se lang != profilo oppure lang == "und" → notifica.
  184.      - >1 traccia:
  185.          - se esiste almeno una traccia == profilo → filtra tenendo tutte le == profilo e tutte le "und"
  186.          - se nessuna == profilo → notifica e NON filtrare (tieni tutte)
  187.    """
  188.     langs_norm = []
  189.     for t in langs or []:
  190.         tr = int(t.get("track"))
  191.         lg = (t.get("language") or "").lower()
  192.         langs_norm.append({"track": tr, "language": lg})
  193.     langs_norm.sort(key=lambda x: x["track"])  # #1, #2, ...
  194.  
  195.     summary_str = ", ".join(f"#{t['track']}={t['language'] or 'und'}" for t in langs_norm)
  196.     notify_msg: Optional[str] = None
  197.  
  198.     if not langs_norm:
  199.         # Caso limite: nessun audio rilevato → niente filtro, segnala.
  200.         return (False, [], "Nessuna traccia audio rilevata dal checker.", summary_str)
  201.  
  202.     profile_language = (profile_language or "").lower()
  203.  
  204.     if len(langs_norm) == 1:
  205.         t = langs_norm[0]
  206.         lang = t["language"]
  207.         # tieni sempre la singola traccia
  208.         keep_relidx = [t["track"] - 1]  # non verrà usato per filtrare (filter_enabled=False), ma è coerente
  209.         if (lang != profile_language) or (lang == "und"):
  210.             notify_msg = f"File con 1 sola traccia audio: #{t['track']}={lang} (profilo={profile_language})."
  211.         return (False, keep_relidx, notify_msg, summary_str)
  212.  
  213.     # >1 traccia
  214.     has_profile = any(t["language"] == profile_language for t in langs_norm)
  215.     if not has_profile:
  216.         # nessun match → tieni tutto, avvisa
  217.         notify_msg = f"Nessuna traccia in lingua profilo ({profile_language}). Rilevate: {summary_str}"
  218.         return (False, [], notify_msg, summary_str)
  219.  
  220.     # c'è almeno un match → filtriamo
  221.     keep_relidx = [t["track"] - 1 for t in langs_norm if t["language"] == profile_language or t["language"] == "und"]
  222.     return (True, keep_relidx, None, summary_str)
  223.  
  224. class EncodeNotFeasible(Exception):
  225.     """Impossibile rispettare i floor minimi con i vincoli dati (limite/durata/audio)."""
  226.     pass
  227.  
  228. # === helper sui bucket ===
  229.  
  230. def _sorted_buckets(floors: Dict[int, int]) -> List[int]:
  231.     return sorted(int(k) for k in floors.keys())
  232.  
  233. def _floor_value_min(floors: Dict[int, int]) -> int:
  234.     """Valore del floor del bucket più basso (quello 'minore')."""
  235.     keys = _sorted_buckets(floors)
  236.     return int(floors[keys[0]])
  237.  
  238. def _bucket_floor_for_height(h: int, floors: Dict[int, int]) -> int:
  239.     """
  240.    Ritorna il bucket più alto <= h; se h < bucket_min, ritorna comunque il bucket_min.
  241.    Nota: questo non implica upscale: l'uso va combinato con la logica MAX_TARGET_HEIGHT.
  242.    """
  243.     keys = _sorted_buckets(floors)
  244.     cand = keys[0]
  245.     for k in keys:
  246.         if k <= h:
  247.             cand = k
  248.         else:
  249.             break
  250.     return cand
  251.  
  252. def _cap_to_max_target(bucket_h: int, max_target_h: int, floors: Dict[int, int]) -> int:
  253.     """Limita il bucket al massimo consentito (max_target_h), scendendo se necessario."""
  254.     keys = _sorted_buckets(floors)
  255.     allowed = [k for k in keys if k <= max_target_h]
  256.     return allowed[-1] if allowed else keys[0]
  257.  
  258. # def _next_lower_bucket(curr: int, floors: Dict[int, int]) -> Optional[int]:
  259. #     keys = _sorted_buckets(floors)
  260. #     if curr not in keys:
  261. #         # normalizza al bucket più vicino >= curr, poi scendi
  262. #         for k in keys:
  263. #             if curr <= k:
  264. #                 curr = k
  265. #                 break
  266. #     idx = keys.index(curr)
  267. #     return keys[idx-1] if idx > 0 else None
  268.  
  269. # def _floor_for_bucket(bucket_h: int, floors: Dict[int,int]) -> int:
  270. #     return int(floors[int(bucket_h)])
  271.  
  272. def _select_bucket_for_v(v_kbps: int, start_bucket: int, floors: Dict[int, int]) -> Optional[int]:
  273.     """
  274.    Dato un budget video e un bucket di partenza, scende finché trova un floor <= v_kbps.
  275.    Ritorna None se nemmeno il bucket minore è soddisfatto.
  276.    """
  277.     b = start_bucket
  278.     while True:
  279.         need = _floor_for_bucket(b, floors)
  280.         if v_kbps >= need:
  281.             return b
  282.         nxt = _next_lower_bucket(b, floors)
  283.         if not nxt:
  284.             return None
  285.         b = nxt
  286.  
  287. # ================ Utilità bucket/floor =================
  288. def _height_bucket(h: int, floors: Dict[int, int]) -> int:
  289.     keys = sorted(int(k) for k in floors.keys())
  290.     for k in keys:
  291.         if h <= k:
  292.             return k
  293.     return keys[-1]
  294.  
  295. def _next_lower_bucket(curr: int, floors: Dict[int, int]) -> Optional[int]:
  296.     keys = sorted(int(k) for k in floors.keys())
  297.     if curr not in keys:
  298.         curr = _height_bucket(curr, floors)
  299.     idx = keys.index(curr)
  300.     return keys[idx-1] if idx > 0 else None
  301.  
  302. def _floor_for_bucket(bucket_h: int, floors: Dict[int,int]) -> int:
  303.     return int(floors[int(bucket_h)])
  304.  
  305. # ================= ffprobe =================
  306. def _ffprobe_json(path: str) -> Dict[str, Any]:
  307.     cmd = [
  308.         "ffprobe","-v","error","-print_format","json",
  309.         "-show_streams","-show_format", path
  310.     ]
  311.     p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  312.     if p.returncode != 0:
  313.         raise RuntimeError(f"ffprobe failed: {p.stderr.decode('utf-8', 'ignore')}")
  314.     return json.loads(p.stdout.decode('utf-8', 'ignore'))
  315.  
  316. def _fmt_bytes(b: int) -> str:
  317.     try:
  318.         if b >= 1024**3: return f"{b/(1024**3):.2f} GiB"
  319.         if b >= 1024**2: return f"{b/(1024**2):.0f} MiB"
  320.         if b >= 1024:    return f"{b/1024:.0f} KiB"
  321.         return f"{b} B"
  322.     except Exception:
  323.         return f"{b} B"
  324.  
  325. def _fmt_hms(sec: float) -> str:
  326.     s = int(round(sec))
  327.     h = s // 3600; m = (s % 3600) // 60; s = s % 60
  328.     return f"{h:02d}:{m:02d}:{s:02d}"
  329.  
  330. def _clone_plan_with_new_v(plan, new_v_kbps: int):
  331.     # Clona plan1 mantenendo audio e vf_height; aggiorna v_kbps e safe_total_kbps
  332.     d = dict(plan.__dict__) if hasattr(plan, "__dict__") else dict(
  333.         safe_total_kbps=getattr(plan, "safe_total_kbps"),
  334.         target_audio_kbps_total=getattr(plan, "target_audio_kbps_total"),
  335.         v_kbps=getattr(plan, "v_kbps"),
  336.         vf_height=getattr(plan, "vf_height"),
  337.         source_video_kbps_est=getattr(plan, "source_video_kbps_est", None),
  338.     )
  339.     d["v_kbps"] = int(max(1, new_v_kbps))
  340.     d["safe_total_kbps"] = int(d["target_audio_kbps_total"] + d["v_kbps"])
  341.     return SimpleNamespace(**d)
  342.  
  343. def _compute_retry_v_kbps(duration_s: float, out1_bytes: int, limit_bytes: int,
  344.                           v1_kbps: int, safety_pct: float, floor_kbps: int = 200) -> int:
  345.     """
  346.    Spalma l'eccesso di byte sulla durata:
  347.      delta_tot_kbps = ceil( (bytes_over * 8) / duration_s / 1000 )
  348.      v2 = v1 - ceil(delta_tot_kbps * (1 + safety_pct))
  349.    Audio resta invariato, quindi tutta la riduzione grava sul video.
  350.    """
  351.     if duration_s <= 0:
  352.         # fallback prudente: togli 10% + safety
  353.         dec = int(math.ceil(v1_kbps * (0.10 * (1.0 + safety_pct))))
  354.         return max(floor_kbps, v1_kbps - dec)
  355.  
  356.     bytes_over = max(0, out1_bytes - limit_bytes)
  357.     if bytes_over <= 0:
  358.         return v1_kbps  # non dovrebbe capitare, ma lasciamo invariato
  359.  
  360.     delta_tot_kbps = int(math.ceil((bytes_over * 8) / duration_s / 1000.0))
  361.     delta_with_margin = int(math.ceil(delta_tot_kbps * (1.0 + safety_pct)))
  362.     v2 = max(floor_kbps, v1_kbps - delta_with_margin)
  363.     return v2
  364.  
  365. # ================ Estrazione contesto =================
  366. @dataclass
  367. class EncodeContext:
  368.     file_path: str
  369.     ext_low: str
  370.     duration_s: float
  371.     width: int
  372.     height: int
  373.     height_bucket: int
  374.     is_10bit: bool
  375.     n_audio: int
  376.     source_total_kbps_est: int
  377.     source_video_kbps_est: int
  378.  
  379. def extract_encode_context(file_path: str) -> EncodeContext:
  380.     src = Path(file_path)
  381.     if not src.exists():
  382.         raise FileNotFoundError("File non trovato")
  383.  
  384.     info = _ffprobe_json(str(src))
  385.     fmt = info.get("format", {})
  386.     streams = info.get("streams", [])
  387.  
  388.     # durata
  389.     dur = None
  390.     if "duration" in fmt and fmt["duration"]:
  391.         try: dur = float(fmt["duration"])
  392.         except: pass
  393.     if not dur:
  394.         for st in streams:
  395.             if st.get("codec_type") == "video" and st.get("duration"):
  396.                 try: dur = float(st["duration"]); break
  397.                 except: pass
  398.     if not dur or dur <= 0:
  399.         raise ValueError("Durata non disponibile da ffprobe")
  400.  
  401.     # video stream principale
  402.     v = next((s for s in streams if s.get("codec_type") == "video"), None)
  403.     if not v:
  404.         raise ValueError("Stream video non trovato")
  405.     w = int(v.get("width") or 0)
  406.     h = int(v.get("height") or 0)
  407.     if w <= 0 or h <= 0:
  408.         raise ValueError("Dimensioni video non valide")
  409.  
  410.     pix_fmt = (v.get("pix_fmt") or "").lower()
  411.     bits_raw = v.get("bits_per_raw_sample")
  412.     is_10 = False
  413.     if isinstance(bits_raw, str):
  414.         try: is_10 = int(bits_raw) >= 10
  415.         except: pass
  416.     if not is_10:
  417.         # euristica
  418.         is_10 = any(tag in pix_fmt for tag in ("p10", "yuv420p10", "p010"))
  419.  
  420.     # audio tracks
  421.     a_streams = [s for s in streams if s.get("codec_type") == "audio"]
  422.     n_a = len(a_streams)
  423.  
  424.     # stima bitrate totale sorgente
  425.     file_bytes = src.stat().st_size
  426.     total_kbps_est = max(1, int((file_bytes * 8) / dur / 1000))
  427.  
  428.     # stima bitrate audio
  429.     audio_known = 0
  430.     for a in a_streams:
  431.         try:
  432.             br = int(a.get("bit_rate") or 0)
  433.             audio_known += br
  434.         except:
  435.             pass
  436.     audio_kbps_known = audio_known // 1000 if audio_known > 0 else 0
  437.     audio_kbps_guess = n_a * AUDIO_KBPS_PER_TRACK if audio_kbps_known == 0 else audio_kbps_known
  438.  
  439.     # stima video
  440.     v_bit_rate = v.get("bit_rate")
  441.     if v_bit_rate:
  442.         try:
  443.             src_v_kbps = max(MIN_VIDEO_KBPS, int(int(v_bit_rate) / 1000))
  444.         except:
  445.             src_v_kbps = max(MIN_VIDEO_KBPS, total_kbps_est - audio_kbps_guess)
  446.     else:
  447.         src_v_kbps = max(MIN_VIDEO_KBPS, total_kbps_est - audio_kbps_guess)
  448.  
  449.     bucket = _height_bucket(h, H265_FLOOR_KBPS)
  450.  
  451.     return EncodeContext(
  452.         file_path=str(src),
  453.         ext_low=src.suffix.lower(),
  454.         duration_s=dur,
  455.         width=w,
  456.         height=h,
  457.         height_bucket=bucket,
  458.         is_10bit=is_10,
  459.         n_audio=n_a,
  460.         source_total_kbps_est=total_kbps_est,
  461.         source_video_kbps_est=src_v_kbps
  462.     )
  463.  
  464. # ================ Calcolo target (prima/seconda passata) =================
  465. @dataclass
  466. class TargetPlan:
  467.     vf_height: int
  468.     v_kbps: int
  469.     target_audio_kbps_total: int
  470.     safe_total_kbps: int
  471.     source_video_kbps_est: int
  472.  
  473. # def compute_target_for_size(ctx: EncodeContext, safe_limit_bytes: int, n_audio_override: Optional[int] = None) -> TargetPlan:
  474. #     safe_total_kbps = max(1, int((safe_limit_bytes * 8) / ctx.duration_s / 1000))
  475. #     used_n_audio = n_audio_override if (n_audio_override is not None) else ctx.n_audio
  476. #     audio_kbps = max(0, used_n_audio) * AUDIO_KBPS_PER_TRACK
  477. #     v_kbps = max(MIN_VIDEO_KBPS, safe_total_kbps - audio_kbps)
  478.  
  479. #     # cap al bitrate video della sorgente
  480. #     if v_kbps > ctx.source_video_kbps_est:
  481. #         v_kbps = ctx.source_video_kbps_est
  482.  
  483. #     vf_h = ctx.height_bucket  # come già facevi
  484.  
  485. #     return TargetPlan(
  486. #         safe_total_kbps=safe_total_kbps,
  487. #         target_audio_kbps_total=audio_kbps,
  488. #         v_kbps=v_kbps,
  489. #         vf_height=vf_h,
  490. #         source_video_kbps_est=ctx.source_video_kbps_est
  491. #     )
  492.  
  493. def compute_target_for_size(
  494.     ctx: EncodeContext,
  495.     safe_limit_bytes: int,
  496.     n_audio_override: Optional[int] = None,
  497. ) -> TargetPlan:
  498.     # 1) budget totale "sicuro"
  499.     safe_total_kbps = max(1, int((safe_limit_bytes * 8) / ctx.duration_s / 1000))
  500.  
  501.     # 2) budget audio
  502.     used_n_audio = n_audio_override if (n_audio_override is not None) else ctx.n_audio
  503.     # se hai ctx.source_audio_kbps_known, preferiscilo quando >0
  504.     audio_kbps = max(0, used_n_audio) * AUDIO_KBPS_PER_TRACK
  505.  
  506.     # 3) budget video candidato
  507.     v_kbps_cand = max(MIN_VIDEO_KBPS, safe_total_kbps - audio_kbps)
  508.  
  509.     # 4) precheck fattibilità sul floor minore (non "480" hardcoded)
  510.     min_floor_val = _floor_value_min(H265_FLOOR_KBPS)
  511.     if v_kbps_cand < min_floor_val:
  512.         raise EncodeNotFeasible(
  513.             f"Video budget {v_kbps_cand} kb/s < min floor {min_floor_val} kb/s → split richiesto"
  514.         )
  515.  
  516.     # 5) bucket di partenza: floor dell'altezza sorgente (no upscale) e clamp al max target
  517.     src_h = int(ctx.height)
  518.     bucket_src_floor = _bucket_floor_for_height(src_h, H265_FLOOR_KBPS)
  519.     bucket_max = _cap_to_max_target(bucket_src_floor, MAX_TARGET_HEIGHT, H265_FLOOR_KBPS)
  520.     bucket_start = min(bucket_src_floor, bucket_max)
  521.  
  522.     # 6) enforce dei floor (scendi finché floor <= v)
  523.     chosen_bucket = _select_bucket_for_v(v_kbps_cand, bucket_start, H265_FLOOR_KBPS)
  524.     if not chosen_bucket:
  525.         raise EncodeNotFeasible(
  526.             f"Nessun bucket soddisfa {v_kbps_cand} kb/s (min floor={min_floor_val}) → split richiesto"
  527.         )
  528.  
  529.     # 7) cap condizionale alla sorgente SOLO se non c'è downscale reale
  530.     # downscale reale = target_height < min(src_h, MAX_TARGET_HEIGHT effettivo in pixel)
  531.     # confrontiamo su altezze reali, non sui bucket nominali superiori a src_h
  532.     downscale_real = chosen_bucket < min(src_h, MAX_TARGET_HEIGHT)
  533.     v_final = v_kbps_cand
  534.  
  535.     if not downscale_real and v_kbps_cand > ctx.source_video_kbps_est:
  536.         v_cap = ctx.source_video_kbps_est
  537.         # Applica cap solo se non rompe il floor al bucket scelto.
  538.         if v_cap >= _floor_for_bucket(chosen_bucket, H265_FLOOR_KBPS):
  539.             v_final = v_cap
  540.         else:
  541.             # Il cap romperebbe il floor. In questo caso è preferibile scendere di bucket
  542.             # e NON applicare cap (regola: dopo downscale il cap non vale).
  543.             lower_ok = _select_bucket_for_v(v_kbps_cand, _next_lower_bucket(chosen_bucket, H265_FLOOR_KBPS) or chosen_bucket, H265_FLOOR_KBPS)
  544.             if lower_ok:
  545.                 chosen_bucket = lower_ok
  546.                 # dopo il downscale, il cap non si applica
  547.                 v_final = v_kbps_cand
  548.             else:
  549.                 # Nemmeno scendendo si rispetta il floor → non fattibile
  550.                 raise EncodeNotFeasible(
  551.                     "Cap alla sorgente impedisce il rispetto del floor e non esistono bucket inferiori → split richiesto"
  552.                 )
  553.  
  554.     # 8) ritorna il piano
  555.     return TargetPlan(
  556.         safe_total_kbps=safe_total_kbps,
  557.         target_audio_kbps_total=audio_kbps,
  558.         v_kbps=int(v_final),
  559.         vf_height=int(chosen_bucket),
  560.         source_video_kbps_est=ctx.source_video_kbps_est,
  561.     )
  562.  
  563. # === adeguamento per il tentativo 2 (retry) ===
  564. def adjust_plan_after_retry(
  565.     ctx: EncodeContext,
  566.     prev_plan: TargetPlan,
  567.     new_video_kbps: int,
  568. ) -> TargetPlan:
  569.     """
  570.    Dato il v_kbps ricalcolato dal meccanismo di retry (sulla base dello sforamento),
  571.    ri-applica i floor e, se serve, scende di bucket. Usa SEMPRE la stessa policy del tentativo 1.
  572.    """
  573.     v2 = max(MIN_VIDEO_KBPS, int(new_video_kbps))
  574.     min_floor_val = _floor_value_min(H265_FLOOR_KBPS)
  575.     if v2 < min_floor_val:
  576.         raise EncodeNotFeasible(
  577.             f"Retry: v={v2} kb/s < min floor {min_floor_val} kb/s → split richiesto"
  578.         )
  579.  
  580.     # bucket di partenza = quello del tentativo precedente (non si risale),
  581.     # e comunque non oltre MAX_TARGET_HEIGHT
  582.     start_bucket = min(prev_plan.vf_height, MAX_TARGET_HEIGHT)
  583.  
  584.     chosen_bucket = _select_bucket_for_v(v2, start_bucket, H265_FLOOR_KBPS)
  585.     if not chosen_bucket:
  586.         raise EncodeNotFeasible(
  587.             f"Retry: nessun bucket soddisfa {v2} kb/s (min floor={min_floor_val}) → split richiesto"
  588.         )
  589.  
  590.     # DOWNscale reale = confronto con la sorgente, NON con il max target.
  591.     downscale_real = chosen_bucket < ctx.height
  592.  
  593.     v_final = v2
  594.     if not downscale_real and v2 > ctx.source_video_kbps_est:
  595.         v_cap = ctx.source_video_kbps_est
  596.         if v_cap >= _floor_for_bucket(chosen_bucket, H265_FLOOR_KBPS):
  597.             v_final = v_cap
  598.         else:
  599.             # Scendi ancora di bucket (se possibile) e ignora il cap
  600.             lower_ok = _select_bucket_for_v(
  601.                 v2,
  602.                 _next_lower_bucket(chosen_bucket, H265_FLOOR_KBPS) or chosen_bucket,
  603.                 H265_FLOOR_KBPS
  604.             )
  605.             if lower_ok and lower_ok != chosen_bucket:
  606.                 chosen_bucket = lower_ok
  607.                 v_final = v2
  608.             else:
  609.                 raise EncodeNotFeasible(
  610.                     "Retry: cap romperebbe il floor e non esistono bucket inferiori → split richiesto"
  611.                 )
  612.  
  613.     return TargetPlan(
  614.         safe_total_kbps=int(v_final + prev_plan.target_audio_kbps_total),  # ricalcolato
  615.         target_audio_kbps_total=prev_plan.target_audio_kbps_total,
  616.         v_kbps=int(v_final),
  617.         vf_height=int(chosen_bucket),
  618.         source_video_kbps_est=ctx.source_video_kbps_est,
  619.     )
  620.  
  621.  
  622.  
  623.  
  624.  
  625.  
  626.  
  627.  
  628.  
  629.  
  630.  
  631.  
  632.  
  633.  
  634.  
  635.  
  636.  
  637.  
  638.  
  639.  
  640.  
  641.  
  642.  
  643.  
  644.  
  645. def adjust_safe_limit_for_overshoot(safe_limit_bytes: int, actual_bytes: int) -> int:
  646.     overs = max(0, actual_bytes - safe_limit_bytes)
  647.     if overs <= 0:
  648.         return safe_limit_bytes
  649.     new_limit = int(safe_limit_bytes - overs * (1.0 + float(RETRY_SAFETY_PCT)))
  650.     # non scendere sotto 16 MiB per evitare input strani
  651.     return max(new_limit, 16 * 1024 * 1024)
  652.  
  653. # ================= Docker/FFmpeg helpers (tue funzioni, con minimi adattamenti) =================
  654. def _docker_ffmpeg_args(cpus=None, cpuset=None, use_gpu=False):
  655.     args = ['docker','run','--rm',
  656.             '-e','DOCKER_CONFIG=/dev/null']
  657.     if cpus:   args += ['--cpus', str(cpus)]
  658.     if cpuset: args += ['--cpuset-cpus', str(cpuset)]
  659.     if use_gpu:
  660.         args += ['--gpus=all',
  661.                  '-e','NVIDIA_VISIBLE_DEVICES=all',
  662.                  '-e','NVIDIA_DRIVER_CAPABILITIES=all']
  663.     args += [
  664.         '--security-opt','seccomp=unconfined',
  665.         '--cap-add','SYS_NICE',
  666.         '-v', docker_mount_point,
  667.         'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
  668.     ]
  669.     return args
  670.  
  671. def _nvenc_cmd(
  672.     file_path: str,
  673.     out_path: str,
  674.     vkbps: int,
  675.     vf_h: int,
  676.     is_10bit: bool,          # non usato qui
  677.     n_audio: int,
  678.     ext_low: str,            # compat
  679.     audio_map_relidx: list[int] | None = None,
  680.     force_stereo: bool = FORCE_STEREO,
  681.     use_dplii_matrix: bool = True,
  682.     keep_subtitles: bool = True,        # nuovo: se True, mantieni i sottotitoli
  683. ) -> list[str]:
  684.     cmd = _docker_ffmpeg_args(use_gpu=True) + [
  685.         '-y','-nostdin','-hide_banner','-loglevel','error',
  686.         '-hwaccel','cuda',
  687.         '-i', file_path,
  688.         '-vf', f'scale=-2:{vf_h}:force_original_aspect_ratio=decrease',
  689.     ]
  690.  
  691.     # --- Mapping ---
  692.     # Video principale
  693.     cmd += ['-map','0:v:0']
  694.  
  695.     # Audio: o filtro esplicito o tutte
  696.     if audio_map_relidx is not None:
  697.         cmd += ['-map','-0:a']  # drop tutte le audio
  698.         for idx in sorted(audio_map_relidx):
  699.             cmd += ['-map', f'0:a:{idx}']
  700.         eff_n_audio = len(audio_map_relidx)
  701.     else:
  702.         cmd += ['-map','0:a?']
  703.         eff_n_audio = n_audio
  704.  
  705.     # Sottotitoli (se richiesto)
  706.     if keep_subtitles:
  707.         cmd += ['-map','0:s?']
  708.         eff_n_subs = 1  # serve solo per flag copy; non puntiamo gli indici qui
  709.     else:
  710.         eff_n_subs = 0
  711.  
  712.     # --- Video (come tuo originale NVENC) ---
  713.     cmd += [
  714.         '-c:v','hevc_nvenc',
  715.         '-rc','vbr_hq','-multipass','fullres',
  716.         '-b:v', f'{int(vkbps)}k',
  717.         '-maxrate', f'{int(vkbps*1.08)}k',
  718.         '-bufsize', f'{int(vkbps*2.0)}k',
  719.         '-rc-lookahead','32',
  720.         '-spatial_aq','1','-temporal_aq','1','-aq-strength','8',
  721.         '-bf','3','-b_ref_mode','middle',
  722.         '-g','240',
  723.     ]
  724.  
  725.     # --- Audio ---
  726.     if eff_n_audio > 0:
  727.         cmd += ['-c:a','libfdk_aac']
  728.         # Default globale (copre eventuali stream non indicizzati)
  729.         cmd += ['-b:a', f'{AUDIO_KBPS_PER_TRACK}k']
  730.  
  731.         # Impostazioni per ciascuna traccia audio
  732.         for i in range(eff_n_audio):
  733.             cmd += [f'-b:a:{i}', f'{AUDIO_KBPS_PER_TRACK}k']
  734.             if force_stereo:
  735.                 cmd += [f'-ac:a:{i}', '2']
  736.                 if use_dplii_matrix:
  737.                     # indica encoding matrix dolby pro logic II (metadato utile)
  738.                     cmd += [f'-filter:a:{i}', 'aresample=matrix_encoding=dplii']
  739.             # Titolo pulito della traccia
  740.             cmd += [f'-metadata:s:a:{i}', f'title=AAC LC 2.0 {AUDIO_KBPS_PER_TRACK} kbps']
  741.  
  742.         # Disposition: prima audio default, le altre non-default
  743.         cmd += ['-disposition:a:0','default']
  744.         for i in range(1, eff_n_audio):
  745.             cmd += [f'-disposition:a:{i}','0']  # rimuove default/forced se ereditati
  746.     else:
  747.         cmd += ['-an']  # nessuna traccia audio
  748.  
  749.     # --- Sottotitoli: copia "as-is" (MKV è perfetto per srt/ass/pgs/vobsub) ---
  750.     if keep_subtitles and eff_n_subs:
  751.         cmd += ['-c:s','copy']
  752.         # opzionale: assicurati che nessun sottotitolo sia "default"
  753.         # (evita sorprese quando ffmpeg ereditasse i flag)
  754.         cmd += ['-disposition:s','0']
  755.  
  756.     # --- Metadati/Capitoli del container ---
  757.     # Puliamo: BPS/NUMBER_OF_BYTES/encoder ecc. (ri-setteremo noi dopo il size-check)
  758.     cmd += ['-map_metadata','-1']
  759.  
  760.     # Matroska container (lavoriamo solo in mkv)
  761.     cmd.append(out_path)
  762.     return cmd
  763.  
  764. def _run(cmd: List[str]) -> Tuple[int, float, str]:
  765.     start = time.time()
  766.     p = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
  767.     elapsed = time.time() - start
  768.     return p.returncode, elapsed, (p.stderr.decode('utf-8', errors='ignore') if p.stderr else "")
  769.  
  770. def run_encode_once(out_path: str, ctx: EncodeContext, plan: TargetPlan, audio_map_relidx: Optional[List[int]] = None) -> Tuple[int, float, int]:
  771.     try:
  772.         if os.path.exists(out_path):
  773.             os.remove(out_path)
  774.     except Exception:
  775.         pass
  776.  
  777.     cmd = _nvenc_cmd(
  778.         file_path=ctx.file_path,
  779.         out_path=out_path,
  780.         vkbps=int(plan.v_kbps),
  781.         vf_h=int(plan.vf_height),
  782.         is_10bit=ctx.is_10bit,
  783.         n_audio=ctx.n_audio,
  784.         ext_low=Path(ctx.file_path).suffix.lower(),
  785.         audio_map_relidx=audio_map_relidx
  786.     )
  787.  
  788.     eff_n_audio = (len(audio_map_relidx) if audio_map_relidx is not None else ctx.n_audio)
  789.     logger.info(
  790.         "[encode] start: out=%s, v=%s kb/s, a_total=%s kb/s, total=%s kb/s, vf_h=%s, audio_streams=%d, dur=%s (%.0fs)",
  791.         out_path, plan.v_kbps, plan.target_audio_kbps_total, plan.safe_total_kbps,
  792.         plan.vf_height, eff_n_audio, _fmt_hms(ctx.duration_s), ctx.duration_s
  793.     )
  794.  
  795.     rc, elapsed, stderr = _run(cmd)
  796.     out_size = os.path.getsize(out_path) if (rc == 0 and os.path.exists(out_path)) else -1
  797.  
  798.     if rc != 0:
  799.         tail = (stderr[-400:] if stderr else "")
  800.         logger.error(
  801.             "[encode] failed: rc=%s, elapsed=%.1fs, out_exists=%s, stderr_tail=%s",
  802.             rc, elapsed, os.path.exists(out_path), tail
  803.         )
  804.     else:
  805.         # bitrate effettivo totale e fattore realtime
  806.         eff_total_kbps = int((out_size * 8) / max(1.0, ctx.duration_s) / 1000)
  807.         delta_pct = ( (eff_total_kbps / plan.safe_total_kbps - 1.0) * 100.0 ) if plan.safe_total_kbps else 0.0
  808.         rt_factor = (ctx.duration_s / max(1e-6, elapsed))
  809.         logger.info(
  810.             "[encode] done: rc=0, elapsed=%.1fs (%.2fx realtime), size=%s (%d B), eff_total=%d kb/s "
  811.             "(plan_total=%s, Δ=%.2f%%)",
  812.             elapsed, rt_factor, _fmt_bytes(out_size), out_size,
  813.             eff_total_kbps, plan.safe_total_kbps, delta_pct
  814.         )
  815.  
  816.     return rc, elapsed, out_size
  817.  
  818. # ===================== Split (tua funzione, resa robusta) =====================
  819. def split_into_streamable_parts(file_path: str, safe_limit_bytes: int, chat_id: int):
  820.     """
  821.    Split per dimensione con strumenti “container-aware”.
  822.    - MP4: MP4Box -splits XM (preferito). Se manca MP4Box: fallback mkvmerge (MKV), opzionale remux in MP4.
  823.    - MKV: mkvmerge --split size:XMiB
  824.    - Altri: mkvmerge --split size:XM (opzionale remux in MP4 se FORCE_MP4_PARTS=True)
  825.    - Nessun loop di affinamento: safe_limit_bytes è il target massimo per parte. Piccole variazioni possibili.
  826.    Ritorna: lista di path (stringhe) delle parti create, oppure [file originale] se sotto soglia, oppure None su errore.
  827.    Richiede: logger, send_telegram_message, eventuali FORCE_MP4_PARTS e _remux_mkv_to_mp4.
  828.    """
  829.  
  830.     def _log_info(msg: str):
  831.         try: logger.info(msg)
  832.         except NameError: print(msg)
  833.  
  834.     def _log_warn(msg: str):
  835.         try: logger.warning(msg)
  836.         except NameError: print(f"WARNING: {msg}")
  837.  
  838.     def _have(tool: str) -> bool:
  839.         return shutil.which(tool) is not None
  840.  
  841.     try:
  842.         src = Path(file_path)
  843.         if not src.exists():
  844.             send_telegram_message(chat_id, "❌ File non trovato per split")
  845.             return None
  846.  
  847.         if not safe_limit_bytes or safe_limit_bytes <= 0:
  848.             send_telegram_message(chat_id, "❌ Limite per parte non valido (safe_limit_bytes)")
  849.             return None
  850.  
  851.         total_size = src.stat().st_size
  852.         if total_size <= safe_limit_bytes:
  853.             return [str(src)]
  854.  
  855.         out_dir = src.parent
  856.         ext = src.suffix.lower()
  857.  
  858.         # Dimensione target (MB interi, minimo 1)
  859.         size_mb = max(1, int(safe_limit_bytes // (1024 * 1024)))
  860.         size_mib = size_mb  # alias per mkvmerge "MiB"
  861.  
  862.         created: List[Path] = []
  863.  
  864.         # Pulizia preventiva di output precedenti ricorrenti per pattern noti
  865.         # MKV pattern: {stem}.split-001.mkv
  866.         for p in out_dir.glob(f"{src.stem}.split-*.mkv"):
  867.             try: p.unlink()
  868.             except Exception: pass
  869.         # MP4Box pattern: {stem}_0001.mp4
  870.         for p in out_dir.glob(f"{src.stem}_*.mp4"):
  871.             try: p.unlink()
  872.             except Exception: pass
  873.  
  874.         if ext == ".mkv":
  875.             if not _have("mkvmerge"):
  876.                 send_telegram_message(chat_id, "❌ mkvmerge non trovato (pacchetto mkvtoolnix)")
  877.                 return None
  878.             base = out_dir / f"{src.stem}.split.mkv"
  879.             cmd = ["mkvmerge", "-o", str(base), "--split", f"size:{size_mib}MiB", str(src)]
  880.             _log_info(f"[split] MKV → {' '.join(shlex.quote(c) for c in cmd)}")
  881.             run = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  882.             rc = run.returncode
  883.             if rc != 0:
  884.                 err = (run.stderr.decode('utf-8', 'ignore') if run.stderr else "")
  885.                 logger.error("[split] mkvmerge rc=%s, stderr(last400)=%s", rc, err[-400:])
  886.                 send_telegram_message(chat_id, "❌ mkvmerge: split fallito")
  887.                 return None
  888.  
  889.             mkv_parts = sorted(out_dir.glob(f"{src.stem}.split-*.mkv"))
  890.             if not mkv_parts and base.exists():
  891.                 mkv_parts = [base]
  892.             if not mkv_parts:
  893.                 send_telegram_message(chat_id, "❌ Nessuna parte creata (mkvmerge)")
  894.                 return None
  895.  
  896.             try:
  897.                 if FORCE_MP4_PARTS:
  898.                     mp4_parts = _remux_mkv_to_mp4(mkv_parts, chat_id)
  899.                     if mp4_parts is None:
  900.                         return None
  901.                     created = [Path(p) for p in mp4_parts]
  902.                 else:
  903.                     created = mkv_parts
  904.             except NameError:
  905.                 created = mkv_parts
  906.  
  907.         elif ext == ".mp4":
  908.             if _have("MP4Box"):
  909.                 cmd = ["MP4Box", "-splits", f"{size_mb}M", str(src)]
  910.                 _log_info(f"[split] MP4 → {' '.join(shlex.quote(c) for c in cmd)}")
  911.                 rc = subprocess.call(cmd, cwd=str(out_dir))
  912.                 if rc != 0:
  913.                     send_telegram_message(chat_id, "❌ MP4Box: split fallito")
  914.                     return None
  915.                 created = sorted([p for p in out_dir.glob("*.mp4") if p.stem.startswith(src.stem + "_")])
  916.                 if not created:
  917.                     created = sorted(out_dir.glob(f"{src.stem}_*.mp4"))
  918.             elif _have("mkvmerge"):
  919.                 base = out_dir / f"{src.stem}.split.mkv"
  920.                 cmd = ["mkvmerge", "-o", str(base), "--split", f"size:{size_mb}M", str(src)]
  921.                 _log_info(f"[split] MP4→MKV (fallback) → {' '.join(shlex.quote(c) for c in cmd)}")
  922.                 rc = subprocess.call(cmd)
  923.                 if rc != 0:
  924.                     send_telegram_message(chat_id, "❌ mkvmerge: split fallito")
  925.                     return None
  926.                 mkv_parts = sorted(out_dir.glob(f"{src.stem}.split-*.mkv"))
  927.                 if not mkv_parts:
  928.                     send_telegram_message(chat_id, "❌ Nessuna parte creata (mkvmerge)")
  929.                     return None
  930.                 try:
  931.                     if FORCE_MP4_PARTS:
  932.                         mp4_parts = _remux_mkv_to_mp4(mkv_parts, chat_id)
  933.                         if mp4_parts is None:
  934.                             return None
  935.                         created = [Path(p) for p in mp4_parts]
  936.                     else:
  937.                         created = mkv_parts
  938.                 except NameError:
  939.                     created = mkv_parts
  940.             else:
  941.                 send_telegram_message(chat_id, "❌ Nessuno strumento per split MP4 (installa GPAC/MP4Box o mkvtoolnix)")
  942.                 return None
  943.  
  944.         elif ext in (".avi", ".mov", ".m4v", ".ts", ".m2ts", ".flv", ".webm", ".mpeg", ".mpg"):
  945.             if not _have("mkvmerge"):
  946.                 send_telegram_message(chat_id, f"❌ mkvmerge non trovato per split di {ext} (installa mkvtoolnix)")
  947.                 return None
  948.             base = out_dir / f"{src.stem}.split.mkv"
  949.             cmd = ["mkvmerge", "-o", str(base), "--split", f"size:{size_mb}M", str(src)]
  950.             _log_info(f"[split] {ext.upper()}→MKV → {' '.join(shlex.quote(c) for c in cmd)}")
  951.             rc = subprocess.call(cmd)
  952.             if rc != 0:
  953.                 send_telegram_message(chat_id, "❌ mkvmerge: split fallito")
  954.                 return None
  955.             mkv_parts = sorted(out_dir.glob(f"{src.stem}.split-*.mkv"))
  956.             if not mkv_parts:
  957.                 send_telegram_message(chat_id, "❌ Nessuna parte creata (mkvmerge)")
  958.                 return None
  959.             try:
  960.                 if FORCE_MP4_PARTS:
  961.                     mp4_parts = _remux_mkv_to_mp4(mkv_parts, chat_id)
  962.                     if mp4_parts is None:
  963.                         return None
  964.                     created = [Path(p) for p in mp4_parts]
  965.                 else:
  966.                     created = mkv_parts
  967.             except NameError:
  968.                 created = mkv_parts
  969.  
  970.         else:
  971.             send_telegram_message(chat_id, f"❌ Estensione non supportata per split: {ext}")
  972.             return None
  973.  
  974.         if not created:
  975.             send_telegram_message(chat_id, "❌ Split: nessun output rilevato")
  976.             return None
  977.  
  978.         overs = [p for p in created if p.stat().st_size > safe_limit_bytes]
  979.         if overs:
  980.             worst = max(overs, key=lambda p: p.stat().st_size)
  981.             _log_warn(f"[split] Una parte supera il target richiesto: {worst.name} = {worst.stat().st_size // (1024*1024)} MB")
  982.  
  983.         return [str(p) for p in created]
  984.  
  985.     except Exception as e:
  986.         try: logger.exception(f"[split] errore inatteso: {e}")
  987.         except NameError: print(f"[split] errore inatteso: {e}")
  988.         try: send_telegram_message(chat_id, f"❌ Errore nello split: {e}")
  989.         except NameError: pass
  990.         return None
  991.  
  992. # ===================== CHECK_FILE (completa) =====================
  993. def check_file(file_path: str, chat_id: int, item_id: int, app) -> dict:
  994.     """
  995.    - Se originale <= limite: original_ok.
  996.    - Altrimenti:
  997.        Tentativo 1: calcolo plan con limite "fudgiato" (solo qui).
  998.        Se OK (<= limite reale): replace e encoded_ok.
  999.        Se overshoot e RETRY_ON_OVERSHOOT=True: Tentativo 2.
  1000.            - Mantiene audio e vf_height del 1°
  1001.            - Ricalcola SOLO v_kbps 'spalmando' l'eccesso + safety.
  1002.        Se 2° OK: replace e encoded_ok.
  1003.        Altrimenti: too_big (lo split lo farà chi chiama).
  1004.    """
  1005.     try:
  1006.         src = Path(file_path)
  1007.         if not src.exists():
  1008.             msg = "❌ File non trovato"
  1009.             try: send_telegram_message(chat_id, msg)
  1010.             except Exception: pass
  1011.             return {"status": "error", "msg": msg, "path": None}
  1012.  
  1013.         orig_size = src.stat().st_size
  1014.         if orig_size <= safe_limit_bytes:
  1015.             # logger.info("[check] Originale sotto soglia: %s <= %s", orig_size, safe_limit_bytes)
  1016.             logger.info(
  1017.                 "[check] originale <= limite: size=%s (%d B) <= limit=%s (%d B) — skip encode.",
  1018.                 _fmt_bytes(orig_size), orig_size, _fmt_bytes(safe_limit_bytes), safe_limit_bytes
  1019.             )
  1020.             return {"status": "original_ok", "path": str(src), "size": orig_size}
  1021.  
  1022.         if app == "sonarr":
  1023.             api_key = sonarr_api
  1024.             host = sonarr_host
  1025.             port = sonarr_port
  1026.         elif app == "radarr":
  1027.             api_key = radarr_api
  1028.             host = radarr_host
  1029.             port = radarr_port
  1030.         else:
  1031.             raise ValueError("app must be 'radarr' or 'sonarr'")
  1032.        
  1033.         profile_language = get_profile_language_iso639_2(app, api_key, host, port, item_id)
  1034.  
  1035.         langs = _analyze_audio_languages_with_docker(src)
  1036.         filter_enabled, keep_relidx, notify_msg, summary_str = _decide_audio_selection(profile_language, langs)
  1037.  
  1038.         # Log elenco tracce rilevate
  1039.         logger.info("[audio] Tracce: %s", summary_str)
  1040.  
  1041.         # Notifiche all'owner, se richieste dalla logica
  1042.         if notify_msg:
  1043.             try:
  1044.                 send_telegram_message(owner_chatid, f"ℹ️ {src.name}: {notify_msg}")
  1045.             except Exception:
  1046.                 pass
  1047.  
  1048.         ctx = extract_encode_context(str(src))
  1049.  
  1050.         # Report keep/remove
  1051.         if filter_enabled:
  1052.             keep_1based = [i+1 for i in sorted(keep_relidx)]
  1053.             all_1based   = [int(t.get("track")) for t in langs]
  1054.             remove_1based = [t for t in all_1based if t not in keep_1based]
  1055.             # logger.info("[audio] Keep: %s | Remove: %s",
  1056.             #             ", ".join(f"#{i}" for i in keep_1based) if keep_1based else "(none)",
  1057.             #             ", ".join(f"#{i}" for i in remove_1based) if remove_1based else "(none)")
  1058.             logger.info(
  1059.                 "[audio] selection: keep=%s (%d/%d), remove=%s",
  1060.                 ", ".join(f"#{i}" for i in keep_1based) if keep_1based else "(none)",
  1061.                 len(keep_1based), len(langs),
  1062.                 ", ".join(f"#{i}" for i in remove_1based) if remove_1based else "(none)"
  1063.             )
  1064.         else:
  1065.             logger.info("[audio] selection: keep=all (%d/%d), remove=(none)", ctx.n_audio, ctx.n_audio)
  1066.  
  1067.         # Context e output temporaneo
  1068.         # ctx = extract_encode_context(str(src))
  1069.         out_tmp = str(src.with_name(f"{src.stem}.encode{REENCODE_OUTPUT_EXT}"))
  1070.         try:
  1071.             if os.path.exists(out_tmp):
  1072.                 os.remove(out_tmp)
  1073.         except Exception:
  1074.             pass
  1075.  
  1076.         dur_txt = _fmt_hms(ctx.duration_s)
  1077.         try:
  1078.             send_telegram_message(
  1079.                 chat_id,
  1080.                 "🗜️ Provo compressione GPU (HEVC NVENC, multipass)…"
  1081.             )
  1082.  
  1083.             logger.info(
  1084.                 "[check] source: name=%s, dur=%s (%.0fs), size=%s (%d B), limit=%s (%d B), app=%s, profile_lang=%s",
  1085.                 src.name, _fmt_hms(ctx.duration_s), ctx.duration_s,
  1086.                 _fmt_bytes(orig_size), orig_size,
  1087.                 _fmt_bytes(safe_limit_bytes), safe_limit_bytes,
  1088.                 app, profile_language or "n/a"
  1089.             )
  1090.  
  1091.             a_src_est = max(0, ctx.source_total_kbps_est - ctx.source_video_kbps_est)
  1092.             logger.info(
  1093.                 "[src] video: %dx%d, %s-bit, v_src≈%d kb/s",
  1094.                 ctx.width, ctx.height, ("10" if ctx.is_10bit else "8"), ctx.source_video_kbps_est
  1095.             )
  1096.             logger.info(
  1097.                 "[src] audio: tracks=%d, a_src_est≈%d kb/s",
  1098.                 ctx.n_audio, a_src_est
  1099.             )
  1100.             logger.info(
  1101.                 "[src] container: total_src≈%d kb/s, dur=%s (%.0fs), size=%s (%d B), ext=%s",
  1102.                 ctx.source_total_kbps_est, dur_txt, ctx.duration_s,
  1103.                 _fmt_bytes(orig_size), orig_size, ctx.ext_low
  1104.             )
  1105.  
  1106.         except Exception:
  1107.             pass
  1108.  
  1109.         # ---- Tentativo 1 (fudge -1% sul limite totale) ----
  1110.         limit_try1 = int(safe_limit_bytes * (1.0 - FUDGE_VBR_FIRST_TRY))
  1111.  
  1112.         n_audio_eff = len(keep_relidx) if filter_enabled else ctx.n_audio
  1113.  
  1114.         plan1 = compute_target_for_size(ctx, limit_try1, n_audio_override=n_audio_eff)
  1115.  
  1116.         # logger.info("[check] Tentativo 1 → target_kbps_tot=%s, audio=%s, video=%s, vf_h=%s",
  1117.         #             plan1.safe_total_kbps, plan1.target_audio_kbps_total, plan1.v_kbps, plan1.vf_height)
  1118.  
  1119.         logger.info(
  1120.             "[plan] try#1: total=%s kb/s (v=%s, a=%s), vf_h=%s, limit_fudged=%s (%d B, fudge=%.2f%%), dur=%s (%.0fs)",
  1121.             plan1.safe_total_kbps, plan1.v_kbps, plan1.target_audio_kbps_total, plan1.vf_height,
  1122.             _fmt_bytes(limit_try1), limit_try1, (FUDGE_VBR_FIRST_TRY * 100.0),
  1123.             _fmt_hms(ctx.duration_s), ctx.duration_s
  1124.         )
  1125.  
  1126.         # Se filtriamo, passiamo la mappa esplicita; altrimenti None per comportamento corrente
  1127.         audio_map = keep_relidx if filter_enabled else None
  1128.         rc1, el1, out1 = run_encode_once(out_tmp, ctx, plan1, audio_map_relidx=audio_map)
  1129.  
  1130.         logger.info("[encode] v_kbps=%s, vf_h=%s, audio_kbps_total=%s, safe_total_kbps=%s, src_v_kbps=%s",
  1131.                     plan1.v_kbps, plan1.vf_height, plan1.target_audio_kbps_total,
  1132.                     plan1.safe_total_kbps, getattr(plan1, "source_video_kbps_est", "n/a"))
  1133.  
  1134.         if rc1 != 0 or out1 <= 0 or not os.path.exists(out_tmp):
  1135.             try: send_telegram_message(chat_id, "❌ Compress. fallita (1° tentativo).")
  1136.             except Exception: pass
  1137.             try:
  1138.                 if os.path.exists(out_tmp):
  1139.                     os.remove(out_tmp)
  1140.             except Exception:
  1141.                 pass
  1142.             return {"status": "error", "path": str(src), "msg": "encode1_failed"}
  1143.  
  1144.         logger.info("[encode] OK rc=%s, size=%s bytes", rc1, out1)
  1145.  
  1146.         if out1 <= safe_limit_bytes:
  1147.             # SUCCESSO → sostituisco l'originale
  1148.             try:
  1149.                 dest_path = src.with_suffix(REENCODE_OUTPUT_EXT)
  1150.                 os.replace(out_tmp, dest_path)
  1151.                 if dest_path != src:
  1152.                     try:
  1153.                         os.remove(src)
  1154.                     except Exception:
  1155.                         pass
  1156.  
  1157.                 try:
  1158.                     eff_n_audio = (len(audio_map) if audio_map is not None else ctx.n_audio)
  1159.                     update_mkv_metadata_final(dest_path, src, eff_n_audio)
  1160.                     logger.info("[final] mkvpropedit: track statistics tags rigenerati + segment title/default audio impostati.")
  1161.                 except Exception:
  1162.                     pass
  1163.  
  1164.                 final_size = os.path.getsize(dest_path)
  1165.  
  1166.                 try:
  1167.                     eff_total_kbps_1 = int((final_size * 8) / max(1.0, ctx.duration_s) / 1000)
  1168.                     margin_pct_1 = (1.0 - (final_size / safe_limit_bytes)) * 100.0
  1169.                     logger.info(
  1170.                         "[final] try#1 OK: size=%s (%d B) ≤ limit=%s (%d B), margin=%.2f%%, "
  1171.                         "eff_total=%d kb/s vs plan_total=%s, v=%s, vf_h=%s, audio_streams=%d, dest=%s",
  1172.                         _fmt_bytes(final_size), final_size,
  1173.                         _fmt_bytes(safe_limit_bytes), safe_limit_bytes, margin_pct_1,
  1174.                         eff_total_kbps_1, plan1.safe_total_kbps, plan1.v_kbps, plan1.vf_height,
  1175.                         (len(audio_map) if filter_enabled else ctx.n_audio),
  1176.                         str(dest_path)
  1177.                     )
  1178.  
  1179.                     size_factor = (orig_size / final_size) if final_size > 0 else 0.0
  1180.                     size_redux_pct = 100.0 * (1.0 - (final_size / orig_size))
  1181.                     tot_redux_pct  = 100.0 * (1.0 - (eff_total_kbps_1 / max(1, ctx.source_total_kbps_est)))
  1182.                     v_redux_pct    = 100.0 * (1.0 - (plan1.v_kbps / max(1, ctx.source_video_kbps_est)))
  1183.  
  1184.                     logger.info(
  1185.                         "[final] delta#1: total≈%d→%d kb/s (Δ=%.2f%%), v≈%d→%d kb/s (plan, Δ=%.2f%%), "
  1186.                         "size %s→%s (x%.2f, Δ=%.2f%%), vf_h=%s, a_streams=%d",
  1187.                         ctx.source_total_kbps_est, eff_total_kbps_1, tot_redux_pct,
  1188.                         ctx.source_video_kbps_est, plan1.v_kbps, v_redux_pct,
  1189.                         _fmt_bytes(orig_size), _fmt_bytes(final_size), size_factor, size_redux_pct,
  1190.                         plan1.vf_height, (len(audio_map) if filter_enabled else ctx.n_audio)
  1191.                     )
  1192.  
  1193.                 except Exception:
  1194.                     pass
  1195.  
  1196.             except Exception:
  1197.                 try: os.remove(out_tmp)
  1198.                 except Exception: pass
  1199.                 final_size = out1  # best effort
  1200.  
  1201.             try:
  1202.                 send_telegram_message(chat_id, f"✅ Compressione ok: {_fmt_bytes(final_size)} (limite {_fmt_bytes(safe_limit_bytes)})")
  1203.             except Exception:
  1204.                 pass
  1205.  
  1206.             # return {"status": "encoded_ok", "path": file_path, "size": final_size,
  1207.             return {"status": "encoded_ok", "path": dest_path, "size": final_size,
  1208.                     "plan": {"vf_height": plan1.vf_height, "v_kbps": plan1.v_kbps,
  1209.                              "audio_kbps": plan1.target_audio_kbps_total,
  1210.                              "safe_total_kbps": plan1.safe_total_kbps,
  1211.                              "src_video_kbps_est": getattr(plan1, "source_video_kbps_est", None)}}
  1212.  
  1213.         # ---- Overshoot → valutazione retry ----
  1214.         if not RETRY_ON_OVERSHOOT:
  1215.             try:
  1216.                 send_telegram_message(chat_id, f"ℹ️ Output {_fmt_bytes(out1)} > limite ({_fmt_bytes(safe_limit_bytes)}). Retry disattivato.")
  1217.  
  1218.                 logger.info(
  1219.                     "[overshoot] try#1: size=%s (%d B) > limit=%s (%d B) by %.2f%% — retry disattivato.",
  1220.                     _fmt_bytes(out1), out1, _fmt_bytes(safe_limit_bytes), safe_limit_bytes,
  1221.                     ((out1 - safe_limit_bytes) / safe_limit_bytes) * 100.0
  1222.                 )
  1223.  
  1224.             except Exception:
  1225.                 pass
  1226.             try:
  1227.                 if os.path.exists(out_tmp):
  1228.                     os.remove(out_tmp)
  1229.             except Exception:
  1230.                 pass
  1231.             return {"status": "too_big", "path": str(src), "size": out1, "msg": "overshoot_no_retry"}
  1232.  
  1233.         overs_bytes = out1 - safe_limit_bytes
  1234.         pct = (overs_bytes / safe_limit_bytes) * 100.0
  1235.        
  1236.         min_floor = _floor_value_min(H265_FLOOR_KBPS)
  1237.  
  1238.         # v2 = _compute_retry_v_kbps(ctx.duration_s, out1, safe_limit_bytes, plan1.v_kbps, RETRY_SAFETY_PCT,
  1239.                                    # floor_kbps=max(64, int(getattr(plan1, "v_kbps_floor", 200))))
  1240.  
  1241.         v2 = _compute_retry_v_kbps(
  1242.             ctx.duration_s, out1, safe_limit_bytes, plan1.v_kbps, RETRY_SAFETY_PCT,
  1243.             floor_kbps=min_floor
  1244.         )
  1245.  
  1246.         try:
  1247.             plan2 = adjust_plan_after_retry(ctx, plan1, v2)  # usa H265_FLOOR_KBPS e MAX_TARGET_HEIGHT interni/globali
  1248.         except EncodeNotFeasible as e:
  1249.             # Retry non fattibile (v2 < floor minore, o nessun bucket compatibile) → chiedi split
  1250.             try:
  1251.                 send_telegram_message(chat_id,
  1252.                     f"❌ Retry impossibile: {e}. Suggerito split.")
  1253.             except Exception:
  1254.                 pass
  1255.             try:
  1256.                 if os.path.exists(out_tmp):
  1257.                     os.remove(out_tmp)
  1258.             except Exception:
  1259.                 pass
  1260.             return {"status": "too_big", "path": str(src), "size": out1, "msg": "overshoot_retry_unfeasible"}
  1261.  
  1262.         # CHANGE: ora invia il messaggio includendo eventuale cambio di risoluzione
  1263.         try:
  1264.             send_telegram_message(
  1265.                 chat_id,
  1266.                 "ℹ️ Overshoot: {} > limite ({}) di {:.2f}%.\n"
  1267.                 "Retry: video {} → {} kb/s; risoluzione {} → {}."
  1268.                 .format(_fmt_bytes(out1), _fmt_bytes(safe_limit_bytes), pct,
  1269.                         plan1.v_kbps, plan2.v_kbps, plan1.vf_height, plan2.vf_height)
  1270.             )
  1271.  
  1272.             logger.info(
  1273.                 "[overshoot] try#1: size=%s (%d B) > limit=%s (%d B) by %.2f%% — "
  1274.                 "retry plan: v=%s→%s kb/s, vf_h=%s→%s",
  1275.                 _fmt_bytes(out1), out1, _fmt_bytes(safe_limit_bytes), safe_limit_bytes, pct,
  1276.                 plan1.v_kbps, plan2.v_kbps, plan1.vf_height, plan2.vf_height
  1277.             )
  1278.         except Exception:
  1279.             pass
  1280.  
  1281.         logger.info(
  1282.             "[plan] try#2: total=%s kb/s (v=%s, a=%s), vf_h=%s",
  1283.             plan2.safe_total_kbps, plan2.v_kbps, plan2.target_audio_kbps_total, plan2.vf_height
  1284.         )
  1285.  
  1286.         rc2, el2, out2 = run_encode_once(out_tmp, ctx, plan2, audio_map_relidx=audio_map)
  1287.         logger.info("[encode] v_kbps=%s, vf_h=%s, audio_kbps_total=%s, safe_total_kbps=%s, src_v_kbps=%s",
  1288.                     plan2.v_kbps, plan2.vf_height, plan2.target_audio_kbps_total,
  1289.                     plan2.safe_total_kbps, getattr(plan2, "source_video_kbps_est", "n/a"))
  1290.  
  1291.         if rc2 != 0 or out2 <= 0 or not os.path.exists(out_tmp):
  1292.             try: send_telegram_message(chat_id, "❌ Compress. fallita (2° tentativo).")
  1293.             except Exception: pass
  1294.             try:
  1295.                 if os.path.exists(out_tmp):
  1296.                     os.remove(out_tmp)
  1297.             except Exception:
  1298.                 pass
  1299.             return {"status": "error", "path": str(src), "msg": "encode2_failed"}
  1300.  
  1301.         logger.info("[encode] OK rc=%s, size=%s bytes", rc2, out2)
  1302.  
  1303.         if out2 <= safe_limit_bytes:
  1304.             try:
  1305.                 dest_path = src.with_suffix(REENCODE_OUTPUT_EXT)
  1306.                 os.replace(out_tmp, dest_path)
  1307.                 if dest_path != src:
  1308.                     try:
  1309.                         os.remove(src)
  1310.                     except Exception:
  1311.                         pass
  1312.  
  1313.                 try:
  1314.                     eff_n_audio = (len(audio_map) if audio_map is not None else ctx.n_audio)
  1315.                     update_mkv_metadata_final(dest_path, src, eff_n_audio)
  1316.                     logger.info("[final] mkvpropedit: track statistics tags rigenerati + segment title/default audio impostati.")
  1317.                 except Exception:
  1318.                     pass
  1319.  
  1320.                 final_size = os.path.getsize(dest_path)
  1321.  
  1322.                 try:
  1323.                     eff_total_kbps_2 = int((final_size * 8) / max(1.0, ctx.duration_s) / 1000)
  1324.                     margin_pct_2 = (1.0 - (final_size / safe_limit_bytes)) * 100.0
  1325.                     logger.info(
  1326.                         "[final] try#2 OK: size=%s (%d B) ≤ limit=%s (%d B), margin=%.2f%%, "
  1327.                         "eff_total=%d kb/s vs plan_total=%s, v=%s, vf_h=%s, audio_streams=%d, dest=%s",
  1328.                         _fmt_bytes(final_size), final_size,
  1329.                         _fmt_bytes(safe_limit_bytes), safe_limit_bytes, margin_pct_2,
  1330.                         eff_total_kbps_2, plan2.safe_total_kbps, plan2.v_kbps, plan2.vf_height,
  1331.                         (len(audio_map) if filter_enabled else ctx.n_audio),
  1332.                         str(dest_path)
  1333.                     )
  1334.  
  1335.                     size_factor = (orig_size / final_size) if final_size > 0 else 0.0
  1336.                     size_redux_pct = 100.0 * (1.0 - (final_size / orig_size))
  1337.                     tot_redux_pct  = 100.0 * (1.0 - (eff_total_kbps_2 / max(1, ctx.source_total_kbps_est)))
  1338.                     v_redux_pct    = 100.0 * (1.0 - (plan2.v_kbps / max(1, ctx.source_video_kbps_est)))
  1339.  
  1340.                     logger.info(
  1341.                         "[final] delta#2: total≈%d→%d kb/s (Δ=%.2f%%), v≈%d→%d kb/s (plan, Δ=%.2f%%), "
  1342.                         "size %s→%s (x%.2f, Δ=%.2f%%), vf_h=%s, a_streams=%d",
  1343.                         ctx.source_total_kbps_est, eff_total_kbps_2, tot_redux_pct,
  1344.                         ctx.source_video_kbps_est, plan2.v_kbps, v_redux_pct,
  1345.                         _fmt_bytes(orig_size), _fmt_bytes(final_size), size_factor, size_redux_pct,
  1346.                         plan2.vf_height, (len(audio_map) if filter_enabled else ctx.n_audio)
  1347.                     )
  1348.  
  1349.                 except Exception:
  1350.                     pass
  1351.  
  1352.             except Exception:
  1353.                 try: os.remove(out_tmp)
  1354.                 except Exception: pass
  1355.                 final_size = out2
  1356.  
  1357.             try:
  1358.                 send_telegram_message(chat_id, f"✅ Compressione ok al 2° tentativo: {_fmt_bytes(final_size)} (limite {_fmt_bytes(safe_limit_bytes)})")
  1359.             except Exception:
  1360.                 pass
  1361.  
  1362.             # return {"status": "encoded_ok", "path": file_path, "size": final_size,
  1363.             return {"status": "encoded_ok", "path": dest_path, "size": final_size,
  1364.                     "plan": {"vf_height": plan2.vf_height, "v_kbps": plan2.v_kbps,
  1365.                              "audio_kbps": plan2.target_audio_kbps_total,
  1366.                              "safe_total_kbps": plan2.safe_total_kbps,
  1367.                              "src_video_kbps_est": getattr(plan2, "source_video_kbps_est", None)}}
  1368.  
  1369.         # Ancora troppo grande → lasciare la decisione di split al chiamante
  1370.         try:
  1371.             if os.path.exists(out_tmp):
  1372.                 os.remove(out_tmp)
  1373.         except Exception:
  1374.             pass
  1375.         try:
  1376.             send_telegram_message(chat_id, f"ℹ️ Output ancora > limite ({_fmt_bytes(safe_limit_bytes)}).")
  1377.         except Exception:
  1378.             pass
  1379.  
  1380.         logger.info(
  1381.             "[final] try#2 still too big: size=%s (%d B) > limit=%s (%d B) by %.2f%% — split demandato al chiamante.",
  1382.             _fmt_bytes(out2), out2, _fmt_bytes(safe_limit_bytes), safe_limit_bytes,
  1383.             ((out2 - safe_limit_bytes) / safe_limit_bytes) * 100.0
  1384.         )
  1385.  
  1386.         return {"status": "too_big", "path": str(src), "size": out2, "msg": "overshoot_after_retry"}
  1387.  
  1388.     except Exception as e:
  1389.         logger.exception("[check_file] eccezione: %s", e)
  1390.         try: send_telegram_message(chat_id, f"❌ Errore check_file: {e}")
  1391.         except Exception: pass
  1392.         return {"status": "error", "path": None, "msg": str(e)}
  1393.  
  1394. def parse_manual_args():
  1395.     p = argparse.ArgumentParser(prog="manual")
  1396.     p.add_argument("--file", required=True)
  1397.     p.add_argument("--chat-id", required=True, type=int)
  1398.     p.add_argument("--entity", required=False, default=None)
  1399.     p.add_argument('--split', metavar='MB', type=int, default=None, help='Forza lo split manuale, dimensione massima di ogni parte in MB')
  1400.     return p.parse_args(sys.argv[2:])
  1401.  
  1402. def run_manual_send(file_path, chat_id, entity, split_mb=None):
  1403.     file_path = str(Path(file_path).resolve())
  1404.     logger.info(f"[MANUAL] file={file_path} chat_id={chat_id} entity={entity} split={split_mb}")
  1405.  
  1406.     if not os.path.exists(file_path):
  1407.         send_telegram_message(chat_id, "❌ File non trovato")
  1408.         return 1
  1409.  
  1410.     # 1) stessa sequenza del flusso *arr: prima compressione (se serve)
  1411.     try:
  1412.         check_file(file_path, chat_id)
  1413.     except Exception as e:
  1414.         logger.warning(f"[MANUAL] check_file ha segnalato: {e}")
  1415.  
  1416.     # Nessuno split richiesto
  1417.     if split_mb is None:
  1418.          ok = send_file_to_telegram(file_path, chat_id, entity)
  1419.          return 0 if ok else 1
  1420.  
  1421.     # Split richiesto: uso il valore passato in MB come hard limit per parte
  1422.     if split_mb <= 0:
  1423.         send_telegram_message(chat_id, "❌ Valore --split non valido (deve essere > 0)")
  1424.         return 1
  1425.  
  1426.     target_bytes = split_mb * 1024 * 1024
  1427.     # Evita di superare il limite telegrafico (safe_limit_bytes): clamp e avviso
  1428.     if target_bytes > safe_limit_bytes:
  1429.         logger.warning(f"[MANUAL] --split={split_mb}MB supera il limite per parte; verrà usato {int(safe_limit_bytes/1024/1024)}MB")
  1430.         target_bytes = safe_limit_bytes
  1431.  
  1432.     logger.info(f"[MANUAL] Forzo split in parti da ~{int(target_bytes/1024/1024)} MB")
  1433.     parts = split_into_streamable_parts(file_path, target_bytes, chat_id)
  1434.  
  1435.     if not parts:
  1436.         send_telegram_message(chat_id, "❌ Split manuale fallito")
  1437.         return 1
  1438.  
  1439.     # Invio parti e pulizia
  1440.     sent_all = True
  1441.     for p in parts:
  1442.         ok = send_file_to_telegram(p, chat_id, entity)
  1443.         if not ok:
  1444.             sent_all = False
  1445.             break
  1446.  
  1447.     # Cancella solo le parti create (non il file originale)
  1448.     if sent_all:
  1449.         orig = os.path.abspath(file_path)
  1450.         for p in parts:
  1451.             try:
  1452.                 if os.path.abspath(p) != orig:
  1453.                     os.remove(p)
  1454.             except Exception:
  1455.                 pass
  1456.     return 0 if sent_all else 1
  1457.  
  1458. def send_telegram_message(chat_id, message, parse_mode=None):
  1459.     # pass
  1460.     try:
  1461.         data = {
  1462.             "chat_id": chat_id,
  1463.             "text": message
  1464.         }
  1465.         if parse_mode:
  1466.             data["parse_mode"] = parse_mode
  1467.  
  1468.         response = requests.post(
  1469.             f"https://api.telegram.org/bot{telegram_bot_token}/sendMessage",
  1470.             data=data
  1471.         )
  1472.         response.raise_for_status()
  1473.     except requests.RequestException as e:
  1474.         logger.error("Failed to send message to Telegram: %s", str(e))
  1475.  
  1476. def extract_telegram_data(tags, keyword, extract_type='username'):
  1477.     if not tags:
  1478.         return 'Unknown user' if extract_type == 'username' else 'Unknown chatid'
  1479.    
  1480.     tag_list = tags.split('|')
  1481.     relevant_tags = [tag for tag in tag_list if keyword in tag]
  1482.    
  1483.     if not relevant_tags:
  1484.         return 'Unknown user' if extract_type == 'username' else 'Unknown chatid'
  1485.  
  1486.     last_tag = relevant_tags[-1]
  1487.     parts = last_tag.split('-')
  1488.  
  1489.     if len(parts) < 3:
  1490.         logger.error(f"Invalid '{last_tag}' tag: must contain 3 parts separated by '-'")
  1491.  
  1492.     if extract_type == 'username':
  1493.         return '@' + parts[1]
  1494.     elif extract_type == 'chatid':
  1495.         return parts[2]
  1496.     else:
  1497.         raise ValueError("extract_type must be 'username' or 'chatid'")
  1498.  
  1499. def get_unique_tags(tags, keyword):
  1500.     if not tags:
  1501.         return []
  1502.  
  1503.     tag_list = tags.split('|')
  1504.     relevant_tags = [tag for tag in tag_list if keyword in tag]
  1505.     unique_tags = []
  1506.  
  1507.     for tag in relevant_tags:
  1508.         username = extract_telegram_data(tag, keyword, 'username')
  1509.         if username not in [extract_telegram_data(t, keyword, 'username') for t in unique_tags]:
  1510.             unique_tags.append(tag)
  1511.     return unique_tags
  1512.  
  1513. def humanize_size(size_in_bytes):
  1514.     size_in_mb = size_in_bytes / 1000000.0  # Convert to MB
  1515.     if size_in_mb > 1000:
  1516.         return f"{size_in_mb / 1000:.2f} GB"
  1517.     else:
  1518.         return f"{size_in_mb:.0f} MB"
  1519.  
  1520. def get_media_info(file_path):
  1521.     """
  1522.    Ritorna un dict con:
  1523.      duration_s, width, height,
  1524.      total_bitrate_kbps (se ffprobe lo fornisce),
  1525.      n_audio, audio_kbps_total (somma bit_rate stream audio se disponibili),
  1526.      v_codec, pix_fmt, color_primaries, transfer, matrix,
  1527.      is_10bit, is_hdr
  1528.    """
  1529.     base_cmd = [
  1530.         'docker','run','--rm',
  1531.         '-e','DOCKER_CONFIG=/dev/null',
  1532.         '--entrypoint','/usr/bin/ffprobe',
  1533.         '-v', docker_mount_point,
  1534.         'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
  1535.         '-v','error',
  1536.         '-print_format','json',
  1537.         '-show_format','-show_streams',
  1538.         file_path
  1539.     ]
  1540.     try:
  1541.         out = subprocess.check_output(base_cmd).decode('utf-8', errors='ignore')
  1542.         import json
  1543.         data = json.loads(out)
  1544.         fmt = data.get('format', {}) or {}
  1545.         streams = data.get('streams', []) or []
  1546.  
  1547.         duration_s = float(fmt.get('duration') or 0) or 0.0
  1548.         total_bitrate_kbps = None
  1549.         if fmt.get('bit_rate'):
  1550.             try:
  1551.                 total_bitrate_kbps = int(int(fmt['bit_rate'])/1000)
  1552.             except Exception:
  1553.                 total_bitrate_kbps = None
  1554.  
  1555.         v = next((s for s in streams if s.get('codec_type')=='video'), None)
  1556.         width = (v or {}).get('width') or 0
  1557.         height = (v or {}).get('height') or 0
  1558.         v_codec = (v or {}).get('codec_name') or ''
  1559.         pix_fmt = (v or {}).get('pix_fmt') or ''
  1560.         color_primaries = (v or {}).get('color_primaries') or ''
  1561.         transfer = (v or {}).get('color_transfer') or ''
  1562.         matrix = (v or {}).get('color_space') or ''
  1563.  
  1564.         is_10bit = ('10' in (v or {}).get('bits_per_raw_sample','')) or pix_fmt in ('p010le','yuv420p10le')
  1565.         # HDR rozza: BT.2020 o transfer SMPTE2084/HLG
  1566.         is_hdr = ('bt2020' in (color_primaries or '').lower()) or ('smpte2084' in (transfer or '').lower()) or ('arib-std-b67' in (transfer or '').lower())
  1567.  
  1568.         # audio
  1569.         a_streams = [s for s in streams if s.get('codec_type')=='audio']
  1570.         n_audio = len(a_streams)
  1571.         audio_kbps_total = 0
  1572.         for a in a_streams:
  1573.             try:
  1574.                 br = int(a.get('bit_rate') or 0)
  1575.                 if br > 0:
  1576.                     audio_kbps_total += int(br/1000)
  1577.                 else:
  1578.                     audio_kbps_total += AUDIO_KBPS_PER_TRACK
  1579.             except Exception:
  1580.                 audio_kbps_total += AUDIO_KBPS_PER_TRACK
  1581.         if n_audio == 0:
  1582.             audio_kbps_total = 0
  1583.  
  1584.         return {
  1585.             'duration_s': max(1.0, duration_s),
  1586.             'width': int(width or 0),
  1587.             'height': int(height or 0),
  1588.             'total_bitrate_kbps': total_bitrate_kbps,
  1589.             'n_audio': n_audio,
  1590.             'audio_kbps_total': int(audio_kbps_total),
  1591.             'v_codec': v_codec,
  1592.             'pix_fmt': pix_fmt,
  1593.             'color_primaries': color_primaries,
  1594.             'transfer': transfer,
  1595.             'matrix': matrix,
  1596.             'is_10bit': bool(is_10bit),
  1597.             'is_hdr': bool(is_hdr),
  1598.         }
  1599.     except Exception as e:
  1600.         logger.warning(f"⚠️ ffprobe media info fallito: {e}")
  1601.         return {
  1602.             'duration_s': 1.0, 'width': 1920, 'height': 1080,
  1603.             'total_bitrate_kbps': None,
  1604.             'n_audio': 1, 'audio_kbps_total': AUDIO_KBPS_PER_TRACK,
  1605.             'v_codec': '', 'pix_fmt':'', 'color_primaries':'', 'transfer':'', 'matrix':'',
  1606.             'is_10bit': False, 'is_hdr': False
  1607.         }
  1608.  
  1609. # Wrapper compat con la tua vecchia firma
  1610. def get_video_info(file_path):
  1611.     mi = get_media_info(file_path)
  1612.     return int(mi['duration_s']), int(mi['width']), int(mi['height'])
  1613.  
  1614. async def generate_thumbnail(video_path):
  1615.     """Genera thumbnail per abilitare streaming MKV"""
  1616.     try:
  1617.         thumb_path = f"/tmp/thumb_{os.path.basename(video_path)}.jpg"
  1618.        
  1619.         ffmpeg_cmd = [
  1620.             'docker', 'run', '--rm', '--entrypoint', '/usr/bin/ffmpeg',
  1621.             '-v', docker_mount_point,
  1622.             'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
  1623.             '-i', video_path,
  1624.             '-ss', '00:01:00',
  1625.             '-vframes', '1',
  1626.             '-q:v', '2',
  1627.             '-y', thumb_path
  1628.         ]
  1629.        
  1630.         subprocess.run(ffmpeg_cmd, check=True, capture_output=True)
  1631.        
  1632.         if os.path.exists(thumb_path):
  1633.             return thumb_path
  1634.         return None
  1635.            
  1636.     except Exception as e:
  1637.         logger.error(f"💥 Errore generazione thumbnail: {e}")
  1638.         return None
  1639.  
  1640. def load_telegram_config():
  1641.     """Carica configurazione Telegram da file JSON"""
  1642.     try:
  1643.         config_file = "/scripts/telegram_upload/telethon_upload.json"
  1644.        
  1645.         if not os.path.exists(config_file):
  1646.             logger.error(f"❌ File configurazione non trovato: {config_file}")
  1647.             return None, None
  1648.            
  1649.         with open(config_file, 'r') as f:
  1650.             config = json.load(f)
  1651.            
  1652.         api_id = config.get('api_id')
  1653.         api_hash = config.get('api_hash')
  1654.        
  1655.         if not api_id or not api_hash:
  1656.             logger.error("❌ api_id o api_hash mancanti nel file di configurazione")
  1657.             return None, None
  1658.            
  1659.         print("✅ Configurazione Telegram caricata")
  1660.         return api_id, api_hash
  1661.        
  1662.     except Exception as e:
  1663.         logger.error(f"💥 Errore caricamento configurazione: {e}")
  1664.         return None, None
  1665.  
  1666. def _have(cmd): return which(cmd) is not None
  1667.  
  1668. def _remux_mkv_to_mp4(mkv_parts, chat_id):
  1669.     out = []
  1670.     for mkv in mkv_parts:
  1671.         mp4 = mkv.with_suffix(".mp4")
  1672.         cmd = [
  1673.             'docker','run','--rm',
  1674.             '-v', docker_mount_point,
  1675.             'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
  1676.             '-hide_banner','-loglevel','error',
  1677.             '-i', str(mkv),
  1678.             '-c','copy','-movflags','+faststart',
  1679.             str(mp4)
  1680.         ]
  1681.         rc = subprocess.call(cmd)
  1682.         if rc != 0 or not mp4.exists():
  1683.             send_telegram_message(chat_id, f"❌ Remux MKV→MP4 fallito: {mkv.name}")
  1684.             return None
  1685.         out.append(mp4)
  1686.     return out
  1687.  
  1688. def get_profile_language_iso639_2(app, api_key, host, port, entity_id):
  1689.     base = host.rstrip("/")
  1690.     if port and f":{port}" not in base:
  1691.         base = f"{base}:{port}"
  1692.     headers = {"X-Api-Key": api_key}
  1693.  
  1694.     if app == "radarr":
  1695.         movie = requests.get(f"{base}/api/v3/movie/{entity_id}", headers=headers).json()
  1696.         qpid = movie["qualityProfileId"]
  1697.         qp = requests.get(f"{base}/api/v3/qualityprofile/{qpid}", headers=headers).json()
  1698.         lang_name = qp["language"]["name"]
  1699.         return pycountry.languages.lookup(lang_name).alpha_3.lower()
  1700.  
  1701.     elif app == "sonarr":
  1702.         # 1) prendi Quality Profile della serie
  1703.         series = requests.get(f"{base}/api/v3/series/{entity_id}", headers=headers).json()
  1704.         qpid = series["qualityProfileId"]
  1705.         qp = requests.get(f"{base}/api/v3/qualityprofile/{qpid}", headers=headers).json()
  1706.  
  1707.         # 2) mappa Custom Formats e trova il CF di tipo LanguageSpecification con score più alto
  1708.         all_cf = requests.get(f"{base}/api/v3/customformat", headers=headers).json()
  1709.         cf_by_id = {cf["id"]: cf for cf in all_cf}
  1710.  
  1711.         best = None  # (score, lang_id)
  1712.         for it in qp.get("formatItems", []):
  1713.             ref = it.get("format")
  1714.             if isinstance(ref, int):
  1715.                 cf_id = ref
  1716.             elif isinstance(ref, dict):
  1717.                 cf_id = ref.get("id") or ref.get("customFormatId")
  1718.             else:
  1719.                 continue
  1720.             cf = cf_by_id.get(cf_id)
  1721.             if not cf:
  1722.                 continue
  1723.             for spec in cf.get("specifications", []):
  1724.                 impl = (spec.get("implementation") or spec.get("implementationName") or "")
  1725.                 if "LanguageSpecification" not in impl:
  1726.                     continue
  1727.                 fields = spec.get("fields")
  1728.                 val = None
  1729.                 if isinstance(fields, dict):
  1730.                     val = fields.get("value")
  1731.                 elif isinstance(fields, list):
  1732.                     for f in fields:
  1733.                         if isinstance(f, dict) and f.get("name") == "value":
  1734.                             val = f.get("value")
  1735.                             break
  1736.                 if val is None:
  1737.                     continue
  1738.                 score = it.get("score", 0)
  1739.                 if best is None or score > best[0]:
  1740.                     best = (score, int(val))
  1741.  
  1742.         # 3) gestisci "Original Language" o assenza
  1743.         if not best or best[1] < 0:
  1744.             return "und"
  1745.  
  1746.         lang_id = best[1]
  1747.  
  1748.         # 4) risolvi ID -> nome canonico tramite API Sonarr
  1749.         langs = requests.get(f"{base}/api/v3/language", headers=headers).json()
  1750.         lang_name = None
  1751.         for L in langs:
  1752.             if L.get("id") == lang_id:
  1753.                 lang_name = L.get("name")
  1754.                 break
  1755.  
  1756.         if not lang_name:
  1757.             return "und"
  1758.  
  1759.         # 5) normalizza pochi casi noti e ritorna ISO 639-2
  1760.         norm = {
  1761.             "flemish": "dutch",
  1762.             "portuguese (brazil)": "portuguese",
  1763.         }
  1764.         key = lang_name.lower()
  1765.         lang_for_lookup = norm.get(key, lang_name)
  1766.  
  1767.         return pycountry.languages.lookup(lang_for_lookup).alpha_3.lower()
  1768.  
  1769.     else:
  1770.         raise ValueError("app must be 'radarr' o 'sonarr'")
  1771.  
  1772. def send_file_to_telegram(file_path, chat_id, telegram_username):
  1773.     """Invia un file/parti streammabili rispettando il limite di 2 GB per file."""
  1774.  
  1775.     # Verifica Telethon
  1776.     if not TELETHON_AVAILABLE:
  1777.         logger.error("❌ Telethon non disponibile - ABORT")
  1778.         send_telegram_message(owner_chatid, "❌ Errore: Telethon non installato")
  1779.         return False
  1780.  
  1781.     # Carica configurazione
  1782.     api_id, api_hash = load_telegram_config()
  1783.     if not api_id or not api_hash:
  1784.         logger.error("❌ Configurazione mancante - ABORT")
  1785.         send_telegram_message(owner_chatid, "❌ Errore: Configurazione Telegram mancante")
  1786.         return False
  1787.  
  1788.     file_size = os.path.getsize(file_path)
  1789.  
  1790.     # Prepara lista file da inviare (singolo o parti)
  1791.     files_to_send = [file_path]
  1792.     if file_size > safe_limit_bytes:
  1793.         send_telegram_message(chat_id, f"📦 File ancora > 2GB ({file_size // 1024 // 1024} MB). Avvio split in parti…")
  1794.         parts = split_into_streamable_parts(file_path, safe_limit_bytes, chat_id)
  1795.         if not parts:
  1796.             send_telegram_message(chat_id, "❌ Split fallito")
  1797.             return False
  1798.         files_to_send = parts
  1799.         send_telegram_message(chat_id, f"✅ Create {len(parts)} parti (< {safe_limit_bytes//1024//1024} MB ciascuna)")
  1800.  
  1801.     async def async_send():
  1802.         session_file = "/scripts/telegram_upload/telethon_upload"
  1803.         client = TelegramClient(session_file, api_id, api_hash)
  1804.         try:
  1805.             print("🔄 Connessione a Telegram...")
  1806.             await asyncio.wait_for(client.start(), timeout=30)
  1807.             print("✅ Connesso a Telegram!")
  1808.  
  1809.             send_telegram_message(chat_id, f"✅ Invio in corso...")
  1810.  
  1811.             total = len(files_to_send)
  1812.             for idx, path in enumerate(files_to_send, start=1):
  1813.                 # Metadati per streaming + thumbnail
  1814.                 duration, width, height = get_video_info(path)
  1815.                 thumbnail_path = await generate_thumbnail(path)
  1816.  
  1817.                 caption = None
  1818.                 if total > 1:
  1819.                     caption = f"{os.path.basename(file_path)}\nParte {idx}/{total}"
  1820.  
  1821.                 def progress_callback(current, total_bytes):
  1822.                     # log leggero
  1823.                     pass
  1824.  
  1825.                 await client.send_file(
  1826.                     telegram_username,
  1827.                     path,
  1828.                     caption=caption,
  1829.                     thumb=thumbnail_path,
  1830.                     supports_streaming=True,
  1831.                     force_document=False,
  1832.                     attributes=[
  1833.                         DocumentAttributeVideo(
  1834.                             duration=duration,
  1835.                             w=width,
  1836.                             h=height,
  1837.                             supports_streaming=True
  1838.                         )
  1839.                     ],
  1840.                     progress_callback=progress_callback
  1841.                 )
  1842.  
  1843.                 if thumbnail_path and os.path.exists(thumbnail_path):
  1844.                     try: os.remove(thumbnail_path)
  1845.                     except: pass
  1846.  
  1847.             send_telegram_message(chat_id, "✅ Invio completato")
  1848.             return True
  1849.  
  1850.         except asyncio.TimeoutError:
  1851.             logger.error("⏰ TIMEOUT durante avvio client Telethon")
  1852.             send_telegram_message(owner_chatid, "❌ Timeout connessione Telegram")
  1853.             return False
  1854.         except FloodWaitError as e:
  1855.             logger.warning(f"⏳ Rate limit: attendi {e.seconds}s")
  1856.             send_telegram_message(chat_id, f"⏳ Rate limit: attendi {e.seconds}s")
  1857.             await asyncio.sleep(e.seconds + 1)
  1858.             return False
  1859.         except Exception as e:
  1860.             logger.error(f"💥 Errore Telethon: {e}")
  1861.             send_telegram_message(owner_chatid, f"❌ Errore invio: {str(e)[:100]}")
  1862.             return False
  1863.         finally:
  1864.             await client.disconnect()
  1865.  
  1866.     try:
  1867.         loop = asyncio.new_event_loop()
  1868.         asyncio.set_event_loop(loop)
  1869.         result = loop.run_until_complete(async_send())
  1870.         loop.close()
  1871.         return result
  1872.     except Exception as e:
  1873.         logger.error(f"💥 Errore wrapper asincrono: {e}")
  1874.         send_telegram_message(owner_chatid, "❌ Errore sistema")
  1875.         return False
  1876.  
  1877. def update_series(sonarr_host, sonarr_port, sonarr_series_tvdbid, sonarr_api):
  1878.     # URL to get the series via tvdbId
  1879.     series_url = f"{sonarr_host}:{sonarr_port}/api/v3/series/lookup?term=tvdb:{sonarr_series_tvdbid}"
  1880.     headers = {
  1881.         "accept": "application/json",
  1882.         "X-Api-Key": sonarr_api
  1883.     }
  1884.  
  1885.     # Get the series
  1886.     response = requests.get(series_url, headers=headers)
  1887.     if response.status_code != 200:
  1888.         logger.error(f"Error in obtaining series data. Status code: {response.status_code}")
  1889.         return False
  1890.  
  1891.     series_data = response.json()
  1892.     if not series_data:
  1893.         logger.error(f"No series found for tvdbId {sonarr_series_tvdbid}")
  1894.         return False
  1895.  
  1896.     series_data = series_data[0]  # Let's take the first search result
  1897.  
  1898.     # URL to get the episodes
  1899.     episodes_url = f"{sonarr_host}:{sonarr_port}/api/v3/episode?seriesId={series_data['id']}"
  1900.     response = requests.get(episodes_url, headers=headers)
  1901.     if response.status_code != 200:
  1902.         logger.error(f"Error in obtaining episode data. Status code: {response.status_code}")
  1903.         return False
  1904.  
  1905.     episodes_data = response.json()
  1906.  
  1907.     # Check the status of episodes for each season
  1908.     seasons_to_unmonitor = set()
  1909.     for episode in episodes_data:
  1910.         season_number = episode['seasonNumber']
  1911.         if episode['monitored']:
  1912.             seasons_to_unmonitor.discard(season_number)
  1913.         else:
  1914.             seasons_to_unmonitor.add(season_number)
  1915.  
  1916.     # If a season has no more monitored episodes, stop monitoring it
  1917.     for season_number in seasons_to_unmonitor:
  1918.         for index, season in enumerate(series_data['seasons']):
  1919.             if season['seasonNumber'] == season_number:
  1920.                 if series_data['seasons'][index]['monitored']:
  1921.                     series_data['seasons'][index]['monitored'] = False
  1922.                     print(f"Season {season_number} set to unmonitored.")
  1923.                 break  # We get out of the cycle once we find and modify the season
  1924.  
  1925.     # Check whether the series has monitored seasons
  1926.     monitored_seasons = [season for season in series_data['seasons'] if season['monitored']]
  1927.  
  1928.     # Update the series with changes
  1929.     update_url = f"{sonarr_host}:{sonarr_port}/api/v3/series/{series_data['id']}"
  1930.     response = requests.put(update_url, headers=headers, json=series_data)
  1931.     if response.status_code == 202:
  1932.         print(f"Series with ID {series_data['id']} successfully updated.")
  1933.     else:
  1934.         logger.error(f"Error updating series with ID {series_data['id']}. Status code: {response.status_code}")
  1935.         return False
  1936.  
  1937.     # If the series has no monitored seasons
  1938.     if not monitored_seasons:
  1939.         if series_data['status'] != 'continuing':
  1940.             # If the series does not continue, cancel the series
  1941.             delete_url = f"{sonarr_host}:{sonarr_port}/api/v3/series/{series_data['id']}"
  1942.             response = requests.delete(delete_url, headers=headers, params={"deleteFiles": "true"})
  1943.             if response.status_code == 200:
  1944.                 print(f"Series with ID {series_data['id']} successfully deleted.")
  1945.                 return True
  1946.             else:
  1947.                 logger.error(f"Error in deleting series with ID {series_data['id']}. Status code: {response.status_code}")
  1948.                 return False
  1949.         else:
  1950.             # If the series continues, do nothing
  1951.             print("Series continues, no action taken.")
  1952.     return True
  1953.  
  1954. def delete_movie(radarr_host, radarr_port, radarr_movie_id, radarr_api):
  1955.     url = f"{radarr_host}:{radarr_port}/api/v3/movie/{radarr_movie_id}"
  1956.     params = {
  1957.         "deleteFiles": "true",
  1958.         "addImportExclusion": "false"
  1959.     }
  1960.     headers = {
  1961.         "accept": "*/*",
  1962.         "X-Api-Key": radarr_api
  1963.     }
  1964.  
  1965.     response = requests.delete(url, params=params, headers=headers)
  1966.  
  1967.     if response.status_code == 200:
  1968.         print(f"Movie with ID {radarr_movie_id} successfully deleted.")
  1969.         return True
  1970.     else:
  1971.         logger.error(f"Error deleting movie with ID {radarr_movie_id}. Status code: {response.status_code}")
  1972.         return False
  1973.  
  1974. def delete_episode_file(sonarr_host, sonarr_port, sonarr_episode_file_id, sonarr_api):
  1975.     url = f"{sonarr_host}:{sonarr_port}/api/v3/episodefile/{sonarr_episode_file_id}"
  1976.     params = {
  1977.         "deleteFiles": "true"
  1978.     }
  1979.     headers = {
  1980.         "accept": "*/*",
  1981.         "X-Api-Key": sonarr_api
  1982.     }
  1983.  
  1984.     response = requests.delete(url, params=params, headers=headers)
  1985.  
  1986.     if response.status_code == 200:
  1987.         print(f"Episode file with ID {sonarr_episode_file_id} successfully deleted.")
  1988.         return True
  1989.     else:
  1990.         logger.error(f"Error deleting episode file with ID {sonarr_episode_file_id}. Status code: {response.status_code}")
  1991.         return False
  1992.  
  1993. def handle_add_event(tags, title, keyword, event_type):
  1994.     item_type = "il film" if event_type == 'radarr' else "la serie"
  1995.     chat_id = owner_chatid
  1996.     message = f"{extract_telegram_data(tags, keyword, 'username')} ha appena aggiunto {item_type} {title}"
  1997.     send_telegram_message(chat_id, message)
  1998.    
  1999.     sys.exit(0)
  2000.  
  2001. def handle_grab_event(tags, title, release_quality, release_title, release_size, imdb_id, keyword, event_type):
  2002.     unique_tags = get_unique_tags(tags, keyword)
  2003.  
  2004.     if unique_tags:
  2005.         for tag in unique_tags:
  2006.             item_type = "Film aggiunto" if event_type == 'radarr' else "Serie aggiunta"
  2007.             chat_id = extract_telegram_data(tag, keyword, 'chatid')
  2008.             human_readable_size = humanize_size(int(release_size))
  2009.             message = f"<b>{item_type} al download:</b>\n{title} ({release_quality})\n[{release_title}] - {human_readable_size}\n<a href='https://imdb.com/title/{imdb_id}'>IMDB Link</a>"
  2010.             send_telegram_message(chat_id, message, 'HTML')
  2011.     else:
  2012.         print("No relevant tags found for Grab event")
  2013.    
  2014.     sys.exit(0)
  2015.  
  2016. def process_sonarr_download(sonarr_series_tags, sonarr_series_title, sonarr_series_year, sonarr_episodefile_seasonnumber,
  2017.                            sonarr_episodefile_episodenumbers, sonarr_episodefile_quality,
  2018.                            sonarr_episodefile_path, sonarr_episodefile_id, sonarr_series_tvdbid, sonarr_series_id):
  2019.     unique_tags = get_unique_tags(sonarr_series_tags, keyword)
  2020.     if unique_tags:
  2021.         for tag in unique_tags:
  2022.             chat_id = extract_telegram_data(tag, keyword, 'chatid')
  2023.             telegram_username = extract_telegram_data(tag, keyword, 'username')
  2024.  
  2025.             message = f"<b>Episodio importato correttamente:</b>\n{sonarr_series_title} ({sonarr_series_year}) - ({sonarr_episodefile_quality})\n[Stagione {sonarr_episodefile_seasonnumber} - Episodio {sonarr_episodefile_episodenumbers}]"
  2026.             send_telegram_message(chat_id, message, 'HTML')
  2027.  
  2028.             check_file(sonarr_episodefile_path, chat_id, sonarr_series_id, app="sonarr")
  2029.  
  2030.             if send_file_to_telegram(sonarr_episodefile_path, chat_id, telegram_username):
  2031.                 delete_episode_file(sonarr_host, sonarr_port, sonarr_episodefile_id, sonarr_api)
  2032.                 update_series(sonarr_host, sonarr_port, sonarr_series_tvdbid, sonarr_api)
  2033.     else:
  2034.         print("No relevant tags found for Download event")
  2035.  
  2036. def process_radarr_download(radarr_movie_tags, radarr_movie_title, radarr_movie_year,
  2037.                            radarr_moviefile_quality, radarr_moviefile_scenename,
  2038.                            radarr_moviefile_path, radarr_movie_id):
  2039.     unique_tags = get_unique_tags(radarr_movie_tags, keyword)
  2040.     if unique_tags:
  2041.         for tag in unique_tags:
  2042.             chat_id = extract_telegram_data(tag, keyword, 'chatid')
  2043.             telegram_username = extract_telegram_data(tag, keyword, 'username')
  2044.  
  2045.             message = f"<b>Film importato correttamente:</b>\n{radarr_movie_title} ({radarr_movie_year}) - ({radarr_moviefile_quality})" + (f"\n[{radarr_moviefile_scenename}]" if radarr_moviefile_scenename else "")
  2046.             send_telegram_message(chat_id, message, 'HTML')
  2047.  
  2048.             check_file(radarr_moviefile_path, chat_id, radarr_movie_id, app="radarr")
  2049.  
  2050.             if send_file_to_telegram(radarr_moviefile_path, chat_id, telegram_username):
  2051.                 delete_movie(radarr_host, radarr_port, radarr_movie_id, radarr_api)
  2052.     else:
  2053.         print("No relevant tags found for Download event")
  2054.  
  2055. def run_radarr_sonarr_flow():
  2056.     ########## SONARR ENVIRONMENTS ##########
  2057.     sonarr_eventtype = os.environ.get('sonarr_eventtype', '')
  2058.     sonarr_series_tags = os.environ.get('sonarr_series_tags', '').lower()
  2059.     sonarr_series_title = os.environ.get('sonarr_series_title', 'Unknown Series')
  2060.     sonarr_release_size = os.environ.get('sonarr_release_size', '')
  2061.     sonarr_release_quality = os.environ.get('sonarr_release_quality', '')
  2062.     sonarr_release_title = os.environ.get('sonarr_release_title', '')
  2063.     sonarr_series_imdbid = os.environ.get('sonarr_series_imdbid', '')
  2064.     sonarr_series_year = os.environ.get('sonarr_series_year', '')
  2065.     sonarr_episodefile_quality = os.environ.get('sonarr_episodefile_quality', '')
  2066.     sonarr_episodefile_seasonnumber = os.environ.get('sonarr_episodefile_seasonnumber', '')
  2067.     sonarr_episodefile_episodenumbers = os.environ.get('sonarr_episodefile_episodenumbers', '')
  2068.     sonarr_episodefile_path = os.environ.get('sonarr_episodefile_path', '')
  2069.     sonarr_episodefile_id = os.environ.get('sonarr_episodefile_id', '')
  2070.     sonarr_series_tvdbid = os.environ.get('sonarr_series_tvdbid', '')
  2071.     sonarr_series_id = os.environ.get('sonarr_series_id', '')
  2072.     ########## SONARR ENVIRONMENTS ##########
  2073.  
  2074.     ########## RADARR ENVIRONMENTS ##########
  2075.     radarr_eventtype = os.environ.get('radarr_eventtype', '')
  2076.     radarr_movie_tags = os.environ.get('radarr_movie_tags', '').lower()
  2077.     radarr_movie_title = os.environ.get('radarr_movie_title', 'Unknown Movie')
  2078.     radarr_release_size = os.environ.get('radarr_release_size', '')
  2079.     radarr_release_quality = os.environ.get('radarr_release_quality', '')
  2080.     radarr_release_title = os.environ.get('radarr_release_title', '')
  2081.     radarr_movie_imdbid = os.environ.get('radarr_movie_imdbid', '')
  2082.     radarr_movie_year = os.environ.get('radarr_movie_year', '')
  2083.     radarr_moviefile_quality = os.environ.get('radarr_moviefile_quality', '')
  2084.     radarr_moviefile_scenename = os.environ.get('radarr_moviefile_scenename', '')
  2085.     radarr_moviefile_path = os.environ.get('radarr_moviefile_path', '')
  2086.     radarr_movie_id = os.environ.get('radarr_movie_id', '')
  2087.     ########## RADARR ENVIRONMENTS ##########
  2088.  
  2089.     # Check if the event type is provided
  2090.     if not radarr_eventtype and not sonarr_eventtype:
  2091.         logger.error("This script works only if called within Radarr or Sonarr")
  2092.         sys.exit(0)
  2093.  
  2094.     if radarr_eventtype == "Test" or sonarr_eventtype == "Test":
  2095.         print("Test in progress... Good-bye!")
  2096.         sys.exit(0)
  2097.  
  2098.     if keyword in radarr_movie_tags or keyword in sonarr_series_tags:
  2099.         # Check the event type
  2100.  
  2101.         if sonarr_eventtype == "SeriesAdd":
  2102.             handle_add_event(
  2103.                 sonarr_series_tags,
  2104.                 sonarr_series_title,
  2105.                 keyword,
  2106.                 'sonarr'
  2107.             )
  2108.  
  2109.         if radarr_eventtype == "MovieAdded":
  2110.             handle_add_event(
  2111.                 radarr_movie_tags,
  2112.                 radarr_movie_title,
  2113.                 keyword,
  2114.                 'radarr'
  2115.             )
  2116.  
  2117.         if sonarr_eventtype == "Grab":
  2118.             handle_grab_event(
  2119.                 sonarr_series_tags,
  2120.                 sonarr_series_title,
  2121.                 sonarr_release_quality,
  2122.                 sonarr_release_title,
  2123.                 sonarr_release_size,
  2124.                 sonarr_series_imdbid,
  2125.                 keyword,
  2126.                 'sonarr'
  2127.             )
  2128.  
  2129.         if radarr_eventtype == "Grab":
  2130.             handle_grab_event(
  2131.                 radarr_movie_tags,
  2132.                 radarr_movie_title,
  2133.                 radarr_release_quality,
  2134.                 radarr_release_title,
  2135.                 radarr_release_size,
  2136.                 radarr_movie_imdbid,
  2137.                 keyword,
  2138.                 'radarr'
  2139.             )
  2140.  
  2141.         if sonarr_eventtype == "Download":
  2142.             process_sonarr_download(
  2143.                 sonarr_series_tags,
  2144.                 sonarr_series_title,
  2145.                 sonarr_series_year,
  2146.                 sonarr_episodefile_seasonnumber,
  2147.                 sonarr_episodefile_episodenumbers,
  2148.                 sonarr_episodefile_quality,
  2149.                 sonarr_episodefile_path,
  2150.                 sonarr_episodefile_id,
  2151.                 sonarr_series_tvdbid,
  2152.                 sonarr_series_id
  2153.             )
  2154.  
  2155.         if radarr_eventtype == "Download":
  2156.             process_radarr_download(
  2157.                 radarr_movie_tags,
  2158.                 radarr_movie_title,
  2159.                 radarr_movie_year,
  2160.                 radarr_moviefile_quality,
  2161.                 radarr_moviefile_scenename,
  2162.                 radarr_moviefile_path,
  2163.                 radarr_movie_id
  2164.             )
  2165.  
  2166. if __name__ == '__main__':
  2167.     # Dump all'avvio (parte sempre, ma non scrive se ENV_DUMP è False)
  2168.     dump_env()
  2169.  
  2170.     # Se richiami con subcomando "manual", esegui test manuale
  2171.     if len(sys.argv) > 1 and sys.argv[1] == 'manual':
  2172.         args = parse_manual_args()
  2173.         sys.exit(
  2174.             run_manual_send(
  2175.                 file_path=args.file,
  2176.                 chat_id=args.chat_id,
  2177.                 entity=args.entity,
  2178.                 split_mb=args.split
  2179.             )
  2180.         )
  2181.     else:
  2182.         # Flusso Radarr/Sonarr invariato
  2183.         run_radarr_sonarr_flow()
Advertisement
Add Comment
Please, Sign In to add comment