Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python3
- import os
- import sys
- import math
- import logging
- import requests
- import subprocess
- import shlex
- import time
- import asyncio
- import json
- import platform
- import tempfile
- import glob
- import argparse
- import shutil
- import pycountry
- from uuid import uuid4
- from shutil import which
- from types import SimpleNamespace
- from pathlib import Path
- from datetime import datetime
- from dataclasses import dataclass
- from typing import Tuple, Dict, Any, Optional, List
- # Configure the logger
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
- logger = logging.getLogger(__name__)
- # Tentativo import Telethon
- try:
- from telethon import TelegramClient
- from telethon.tl.types import DocumentAttributeVideo, DocumentAttributeFilename
- from telethon.errors import FloodWaitError, FilePartsInvalidError
- TELETHON_AVAILABLE = True
- print("✅ Telethon importato correttamente")
- except ImportError as e:
- TELETHON_AVAILABLE = False
- logger.error(f"❌ Telethon non disponibile: {e}")
- ########## CONFIGURATIONS ##########
- # Telegram
- telegram_bot_token = "***"
- owner_chatid = "***"
- # Radarr
- radarr_api = "***"
- radarr_host = "***"
- radarr_port = "***"
- # Sonarr
- sonarr_api = "***"
- sonarr_host = "***"
- sonarr_port = "***"
- ########## CONFIGURATIONS ##########
- keyword = "searcharr"
- chunk_bytes = 512 * 1024 # 512 KiB, limite Telethon/Telegram
- max_parts_default = 4000 # limite parti tipico lato server (non-Premium)
- safe_limit_factor = 1
- safe_limit_bytes = int(chunk_bytes * max_parts_default * safe_limit_factor)
- REENCODE_OUTPUT_EXT = ".mkv"
- FORCE_MP4_PARTS = False # se True e manca MP4Box, remux dei .mkv in .mp4 via ffmpeg -c copy + faststart
- docker_mount_point = "/srv/dev-disk-by-uuid-ecc70552-d999-49f3-b5e4-9a0250e1c559/Sharing:/Sharing"
- whisperai_model_mount_point = "/srv/dev-disk-by-uuid-ecc70552-d999-49f3-b5e4-9a0250e1c559/Sharing/Telegram-bot/audiomedia-models:/models"
- ENV_DUMP = False # True/False
- ENV_DUMP_FILE = "/Sharing/Telegram-bot/Sonarr-env.txt" # percorso del file di output
- # --- Politiche HEVC / Qualità ---
- # Floor minimi H.265 per film/serie live-action (kbps). Regolali a piacere.
- H265_FLOOR_KBPS = {
- 2160: 12000,
- 1440: 8100,
- 1080: 1000,
- 720: 750,
- 576: 450,
- 480: 420,
- }
- MAX_TARGET_HEIGHT = 1080
- # Stima “sicura” (rientro target) — usa già nel tuo codice
- RETRY_ON_OVERSHOOT = True # un solo retry abbassando il video_kbps se sfora safe_limit_bytes
- RETRY_SAFETY_PCT = 0.005
- FUDGE_VBR_FIRST_TRY = 0.0135
- # Audio per traccia (se non riusciamo a leggere il bitrate reale)
- AUDIO_KBPS_PER_TRACK = 96
- FORCE_STEREO = True # forza sempre 2 canali
- # Bitrate video minimo tecnico (guard-rail)
- MIN_VIDEO_KBPS = 100
- # Preserva 10‑bit/HDR in uscita (usa p010le + main10)
- NVENC_PRESERVE_10BIT = True
- def dump_env():
- if not ENV_DUMP:
- return None
- # crea la cartella se serve
- d = os.path.dirname(ENV_DUMP_FILE)
- if d:
- os.makedirs(d, exist_ok=True)
- # usa "a" invece di "w" se vuoi accumulare più run
- with open(ENV_DUMP_FILE, "w", encoding="utf-8") as f:
- for k, v in sorted(os.environ.items()):
- f.write(f"{k}={v}\n")
- return ENV_DUMP_FILE
- def _safe_run(cmd: list[str]) -> None:
- try:
- subprocess.run(cmd, check=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
- except Exception:
- pass
- def update_mkv_metadata_final(out_path: Path, src_path: Path, eff_n_audio: int) -> None:
- """
- Esegue SOLO quando il file finale è nei limiti.
- - Reset e rigenerazione dei Track Statistics Tags (fix per MediaInfo: bitrate/bytes/durata corretti)
- - Segment title = nome file sorgente (senza estensione)
- - Primo audio default; gli altri non default
- - Commenti/encoded_by
- Richiede mkvpropedit in PATH. Se assente, esce silenziosamente.
- """
- if out_path.suffix.lower() != '.mkv':
- return
- if shutil.which('mkvpropedit') is None:
- return
- # 1) Track Statistics Tags: elimina qualunque residuo e rigenera dal contenuto attuale
- _safe_run(['mkvpropedit', str(out_path), '--delete-track-statistics-tags'])
- _safe_run(['mkvpropedit', str(out_path), '--add-track-statistics-tags'])
- # 2) Segment title
- title = src_path.stem
- _safe_run(['mkvpropedit', str(out_path), '--set', f'title={title}'])
- # 3) Flag default audio: a1 default, altri no (mkvpropedit conta da 1)
- if eff_n_audio > 0:
- _safe_run(['mkvpropedit', str(out_path), '--edit', 'track:a1', '--set', 'flag-default=1'])
- for i in range(2, eff_n_audio+1):
- _safe_run(['mkvpropedit', str(out_path), '--edit', f'track:a{i}', '--set', 'flag-default=0'])
- # 4) Commenti globali (facoltativi)
- comment = f'Encoded HEVC NVENC + AAC LC @ {AUDIO_KBPS_PER_TRACK} kbps per track'
- _safe_run(['mkvpropedit', str(out_path), '--edit', 'info', '--set', f'comment={comment}'])
- _safe_run(['mkvpropedit', str(out_path), '--edit', 'info', '--set', 'encoded_by=auto-compressor'])
- def _analyze_audio_languages_with_docker(src: Path) -> list[dict]:
- host_prefix, container_prefix = docker_mount_point.split(":", 1)
- p = src.resolve().as_posix()
- if p.startswith(host_prefix):
- container_file = container_prefix + p[len(host_prefix):]
- else:
- container_file = p # se non è nel mount, uso così com'è (niente assert)
- cmd = [
- "docker", "run", "--rm", "--gpus", "all",
- "-v", docker_mount_point,
- "-v", whisperai_model_mount_point,
- "chryses/audiomedia-checker:latest",
- "--gpu", "--check-all-tracks", "--dry-run", "--json",
- "--file", container_file,
- ]
- res = subprocess.run(cmd, capture_output=True, text=True, check=True)
- return json.loads(res.stdout.strip()) # es.: [{"track":1,"language":"ita"}, ...]
- def _decide_audio_selection(profile_language: str, langs: List[dict]) -> Tuple[bool, List[int], Optional[str], str]:
- """
- Ritorna:
- - filter_enabled: bool → True se dobbiamo filtrare (cioè rimuovere alcune tracce)
- - keep_relidx: lista 0-based degli indici relativi audio da tenere (validi per ffmpeg 0:a:{idx})
- - notify_msg: eventuale messaggio per owner_chatid
- - summary_str: riepilogo tracce tipo "#1=ita, #2=eng"
- Regole:
- - 1 traccia: tieni sempre; se lang != profilo oppure lang == "und" → notifica.
- - >1 traccia:
- - se esiste almeno una traccia == profilo → filtra tenendo tutte le == profilo e tutte le "und"
- - se nessuna == profilo → notifica e NON filtrare (tieni tutte)
- """
- langs_norm = []
- for t in langs or []:
- tr = int(t.get("track"))
- lg = (t.get("language") or "").lower()
- langs_norm.append({"track": tr, "language": lg})
- langs_norm.sort(key=lambda x: x["track"]) # #1, #2, ...
- summary_str = ", ".join(f"#{t['track']}={t['language'] or 'und'}" for t in langs_norm)
- notify_msg: Optional[str] = None
- if not langs_norm:
- # Caso limite: nessun audio rilevato → niente filtro, segnala.
- return (False, [], "Nessuna traccia audio rilevata dal checker.", summary_str)
- profile_language = (profile_language or "").lower()
- if len(langs_norm) == 1:
- t = langs_norm[0]
- lang = t["language"]
- # tieni sempre la singola traccia
- keep_relidx = [t["track"] - 1] # non verrà usato per filtrare (filter_enabled=False), ma è coerente
- if (lang != profile_language) or (lang == "und"):
- notify_msg = f"File con 1 sola traccia audio: #{t['track']}={lang} (profilo={profile_language})."
- return (False, keep_relidx, notify_msg, summary_str)
- # >1 traccia
- has_profile = any(t["language"] == profile_language for t in langs_norm)
- if not has_profile:
- # nessun match → tieni tutto, avvisa
- notify_msg = f"Nessuna traccia in lingua profilo ({profile_language}). Rilevate: {summary_str}"
- return (False, [], notify_msg, summary_str)
- # c'è almeno un match → filtriamo
- keep_relidx = [t["track"] - 1 for t in langs_norm if t["language"] == profile_language or t["language"] == "und"]
- return (True, keep_relidx, None, summary_str)
- class EncodeNotFeasible(Exception):
- """Impossibile rispettare i floor minimi con i vincoli dati (limite/durata/audio)."""
- pass
- # === helper sui bucket ===
- def _sorted_buckets(floors: Dict[int, int]) -> List[int]:
- return sorted(int(k) for k in floors.keys())
- def _floor_value_min(floors: Dict[int, int]) -> int:
- """Valore del floor del bucket più basso (quello 'minore')."""
- keys = _sorted_buckets(floors)
- return int(floors[keys[0]])
- def _bucket_floor_for_height(h: int, floors: Dict[int, int]) -> int:
- """
- Ritorna il bucket più alto <= h; se h < bucket_min, ritorna comunque il bucket_min.
- Nota: questo non implica upscale: l'uso va combinato con la logica MAX_TARGET_HEIGHT.
- """
- keys = _sorted_buckets(floors)
- cand = keys[0]
- for k in keys:
- if k <= h:
- cand = k
- else:
- break
- return cand
- def _cap_to_max_target(bucket_h: int, max_target_h: int, floors: Dict[int, int]) -> int:
- """Limita il bucket al massimo consentito (max_target_h), scendendo se necessario."""
- keys = _sorted_buckets(floors)
- allowed = [k for k in keys if k <= max_target_h]
- return allowed[-1] if allowed else keys[0]
- # def _next_lower_bucket(curr: int, floors: Dict[int, int]) -> Optional[int]:
- # keys = _sorted_buckets(floors)
- # if curr not in keys:
- # # normalizza al bucket più vicino >= curr, poi scendi
- # for k in keys:
- # if curr <= k:
- # curr = k
- # break
- # idx = keys.index(curr)
- # return keys[idx-1] if idx > 0 else None
- # def _floor_for_bucket(bucket_h: int, floors: Dict[int,int]) -> int:
- # return int(floors[int(bucket_h)])
- def _select_bucket_for_v(v_kbps: int, start_bucket: int, floors: Dict[int, int]) -> Optional[int]:
- """
- Dato un budget video e un bucket di partenza, scende finché trova un floor <= v_kbps.
- Ritorna None se nemmeno il bucket minore è soddisfatto.
- """
- b = start_bucket
- while True:
- need = _floor_for_bucket(b, floors)
- if v_kbps >= need:
- return b
- nxt = _next_lower_bucket(b, floors)
- if not nxt:
- return None
- b = nxt
- # ================ Utilità bucket/floor =================
- def _height_bucket(h: int, floors: Dict[int, int]) -> int:
- keys = sorted(int(k) for k in floors.keys())
- for k in keys:
- if h <= k:
- return k
- return keys[-1]
- def _next_lower_bucket(curr: int, floors: Dict[int, int]) -> Optional[int]:
- keys = sorted(int(k) for k in floors.keys())
- if curr not in keys:
- curr = _height_bucket(curr, floors)
- idx = keys.index(curr)
- return keys[idx-1] if idx > 0 else None
- def _floor_for_bucket(bucket_h: int, floors: Dict[int,int]) -> int:
- return int(floors[int(bucket_h)])
- # ================= ffprobe =================
- def _ffprobe_json(path: str) -> Dict[str, Any]:
- cmd = [
- "ffprobe","-v","error","-print_format","json",
- "-show_streams","-show_format", path
- ]
- p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if p.returncode != 0:
- raise RuntimeError(f"ffprobe failed: {p.stderr.decode('utf-8', 'ignore')}")
- return json.loads(p.stdout.decode('utf-8', 'ignore'))
- def _fmt_bytes(b: int) -> str:
- try:
- if b >= 1024**3: return f"{b/(1024**3):.2f} GiB"
- if b >= 1024**2: return f"{b/(1024**2):.0f} MiB"
- if b >= 1024: return f"{b/1024:.0f} KiB"
- return f"{b} B"
- except Exception:
- return f"{b} B"
- def _fmt_hms(sec: float) -> str:
- s = int(round(sec))
- h = s // 3600; m = (s % 3600) // 60; s = s % 60
- return f"{h:02d}:{m:02d}:{s:02d}"
- def _clone_plan_with_new_v(plan, new_v_kbps: int):
- # Clona plan1 mantenendo audio e vf_height; aggiorna v_kbps e safe_total_kbps
- d = dict(plan.__dict__) if hasattr(plan, "__dict__") else dict(
- safe_total_kbps=getattr(plan, "safe_total_kbps"),
- target_audio_kbps_total=getattr(plan, "target_audio_kbps_total"),
- v_kbps=getattr(plan, "v_kbps"),
- vf_height=getattr(plan, "vf_height"),
- source_video_kbps_est=getattr(plan, "source_video_kbps_est", None),
- )
- d["v_kbps"] = int(max(1, new_v_kbps))
- d["safe_total_kbps"] = int(d["target_audio_kbps_total"] + d["v_kbps"])
- return SimpleNamespace(**d)
- def _compute_retry_v_kbps(duration_s: float, out1_bytes: int, limit_bytes: int,
- v1_kbps: int, safety_pct: float, floor_kbps: int = 200) -> int:
- """
- Spalma l'eccesso di byte sulla durata:
- delta_tot_kbps = ceil( (bytes_over * 8) / duration_s / 1000 )
- v2 = v1 - ceil(delta_tot_kbps * (1 + safety_pct))
- Audio resta invariato, quindi tutta la riduzione grava sul video.
- """
- if duration_s <= 0:
- # fallback prudente: togli 10% + safety
- dec = int(math.ceil(v1_kbps * (0.10 * (1.0 + safety_pct))))
- return max(floor_kbps, v1_kbps - dec)
- bytes_over = max(0, out1_bytes - limit_bytes)
- if bytes_over <= 0:
- return v1_kbps # non dovrebbe capitare, ma lasciamo invariato
- delta_tot_kbps = int(math.ceil((bytes_over * 8) / duration_s / 1000.0))
- delta_with_margin = int(math.ceil(delta_tot_kbps * (1.0 + safety_pct)))
- v2 = max(floor_kbps, v1_kbps - delta_with_margin)
- return v2
- # ================ Estrazione contesto =================
- @dataclass
- class EncodeContext:
- file_path: str
- ext_low: str
- duration_s: float
- width: int
- height: int
- height_bucket: int
- is_10bit: bool
- n_audio: int
- source_total_kbps_est: int
- source_video_kbps_est: int
- def extract_encode_context(file_path: str) -> EncodeContext:
- src = Path(file_path)
- if not src.exists():
- raise FileNotFoundError("File non trovato")
- info = _ffprobe_json(str(src))
- fmt = info.get("format", {})
- streams = info.get("streams", [])
- # durata
- dur = None
- if "duration" in fmt and fmt["duration"]:
- try: dur = float(fmt["duration"])
- except: pass
- if not dur:
- for st in streams:
- if st.get("codec_type") == "video" and st.get("duration"):
- try: dur = float(st["duration"]); break
- except: pass
- if not dur or dur <= 0:
- raise ValueError("Durata non disponibile da ffprobe")
- # video stream principale
- v = next((s for s in streams if s.get("codec_type") == "video"), None)
- if not v:
- raise ValueError("Stream video non trovato")
- w = int(v.get("width") or 0)
- h = int(v.get("height") or 0)
- if w <= 0 or h <= 0:
- raise ValueError("Dimensioni video non valide")
- pix_fmt = (v.get("pix_fmt") or "").lower()
- bits_raw = v.get("bits_per_raw_sample")
- is_10 = False
- if isinstance(bits_raw, str):
- try: is_10 = int(bits_raw) >= 10
- except: pass
- if not is_10:
- # euristica
- is_10 = any(tag in pix_fmt for tag in ("p10", "yuv420p10", "p010"))
- # audio tracks
- a_streams = [s for s in streams if s.get("codec_type") == "audio"]
- n_a = len(a_streams)
- # stima bitrate totale sorgente
- file_bytes = src.stat().st_size
- total_kbps_est = max(1, int((file_bytes * 8) / dur / 1000))
- # stima bitrate audio
- audio_known = 0
- for a in a_streams:
- try:
- br = int(a.get("bit_rate") or 0)
- audio_known += br
- except:
- pass
- audio_kbps_known = audio_known // 1000 if audio_known > 0 else 0
- audio_kbps_guess = n_a * AUDIO_KBPS_PER_TRACK if audio_kbps_known == 0 else audio_kbps_known
- # stima video
- v_bit_rate = v.get("bit_rate")
- if v_bit_rate:
- try:
- src_v_kbps = max(MIN_VIDEO_KBPS, int(int(v_bit_rate) / 1000))
- except:
- src_v_kbps = max(MIN_VIDEO_KBPS, total_kbps_est - audio_kbps_guess)
- else:
- src_v_kbps = max(MIN_VIDEO_KBPS, total_kbps_est - audio_kbps_guess)
- bucket = _height_bucket(h, H265_FLOOR_KBPS)
- return EncodeContext(
- file_path=str(src),
- ext_low=src.suffix.lower(),
- duration_s=dur,
- width=w,
- height=h,
- height_bucket=bucket,
- is_10bit=is_10,
- n_audio=n_a,
- source_total_kbps_est=total_kbps_est,
- source_video_kbps_est=src_v_kbps
- )
- # ================ Calcolo target (prima/seconda passata) =================
- @dataclass
- class TargetPlan:
- vf_height: int
- v_kbps: int
- target_audio_kbps_total: int
- safe_total_kbps: int
- source_video_kbps_est: int
- # def compute_target_for_size(ctx: EncodeContext, safe_limit_bytes: int, n_audio_override: Optional[int] = None) -> TargetPlan:
- # safe_total_kbps = max(1, int((safe_limit_bytes * 8) / ctx.duration_s / 1000))
- # used_n_audio = n_audio_override if (n_audio_override is not None) else ctx.n_audio
- # audio_kbps = max(0, used_n_audio) * AUDIO_KBPS_PER_TRACK
- # v_kbps = max(MIN_VIDEO_KBPS, safe_total_kbps - audio_kbps)
- # # cap al bitrate video della sorgente
- # if v_kbps > ctx.source_video_kbps_est:
- # v_kbps = ctx.source_video_kbps_est
- # vf_h = ctx.height_bucket # come già facevi
- # return TargetPlan(
- # safe_total_kbps=safe_total_kbps,
- # target_audio_kbps_total=audio_kbps,
- # v_kbps=v_kbps,
- # vf_height=vf_h,
- # source_video_kbps_est=ctx.source_video_kbps_est
- # )
- def compute_target_for_size(
- ctx: EncodeContext,
- safe_limit_bytes: int,
- n_audio_override: Optional[int] = None,
- ) -> TargetPlan:
- # 1) budget totale "sicuro"
- safe_total_kbps = max(1, int((safe_limit_bytes * 8) / ctx.duration_s / 1000))
- # 2) budget audio
- used_n_audio = n_audio_override if (n_audio_override is not None) else ctx.n_audio
- # se hai ctx.source_audio_kbps_known, preferiscilo quando >0
- audio_kbps = max(0, used_n_audio) * AUDIO_KBPS_PER_TRACK
- # 3) budget video candidato
- v_kbps_cand = max(MIN_VIDEO_KBPS, safe_total_kbps - audio_kbps)
- # 4) precheck fattibilità sul floor minore (non "480" hardcoded)
- min_floor_val = _floor_value_min(H265_FLOOR_KBPS)
- if v_kbps_cand < min_floor_val:
- raise EncodeNotFeasible(
- f"Video budget {v_kbps_cand} kb/s < min floor {min_floor_val} kb/s → split richiesto"
- )
- # 5) bucket di partenza: floor dell'altezza sorgente (no upscale) e clamp al max target
- src_h = int(ctx.height)
- bucket_src_floor = _bucket_floor_for_height(src_h, H265_FLOOR_KBPS)
- bucket_max = _cap_to_max_target(bucket_src_floor, MAX_TARGET_HEIGHT, H265_FLOOR_KBPS)
- bucket_start = min(bucket_src_floor, bucket_max)
- # 6) enforce dei floor (scendi finché floor <= v)
- chosen_bucket = _select_bucket_for_v(v_kbps_cand, bucket_start, H265_FLOOR_KBPS)
- if not chosen_bucket:
- raise EncodeNotFeasible(
- f"Nessun bucket soddisfa {v_kbps_cand} kb/s (min floor={min_floor_val}) → split richiesto"
- )
- # 7) cap condizionale alla sorgente SOLO se non c'è downscale reale
- # downscale reale = target_height < min(src_h, MAX_TARGET_HEIGHT effettivo in pixel)
- # confrontiamo su altezze reali, non sui bucket nominali superiori a src_h
- downscale_real = chosen_bucket < min(src_h, MAX_TARGET_HEIGHT)
- v_final = v_kbps_cand
- if not downscale_real and v_kbps_cand > ctx.source_video_kbps_est:
- v_cap = ctx.source_video_kbps_est
- # Applica cap solo se non rompe il floor al bucket scelto.
- if v_cap >= _floor_for_bucket(chosen_bucket, H265_FLOOR_KBPS):
- v_final = v_cap
- else:
- # Il cap romperebbe il floor. In questo caso è preferibile scendere di bucket
- # e NON applicare cap (regola: dopo downscale il cap non vale).
- lower_ok = _select_bucket_for_v(v_kbps_cand, _next_lower_bucket(chosen_bucket, H265_FLOOR_KBPS) or chosen_bucket, H265_FLOOR_KBPS)
- if lower_ok:
- chosen_bucket = lower_ok
- # dopo il downscale, il cap non si applica
- v_final = v_kbps_cand
- else:
- # Nemmeno scendendo si rispetta il floor → non fattibile
- raise EncodeNotFeasible(
- "Cap alla sorgente impedisce il rispetto del floor e non esistono bucket inferiori → split richiesto"
- )
- # 8) ritorna il piano
- return TargetPlan(
- safe_total_kbps=safe_total_kbps,
- target_audio_kbps_total=audio_kbps,
- v_kbps=int(v_final),
- vf_height=int(chosen_bucket),
- source_video_kbps_est=ctx.source_video_kbps_est,
- )
- # === adeguamento per il tentativo 2 (retry) ===
- def adjust_plan_after_retry(
- ctx: EncodeContext,
- prev_plan: TargetPlan,
- new_video_kbps: int,
- ) -> TargetPlan:
- """
- Dato il v_kbps ricalcolato dal meccanismo di retry (sulla base dello sforamento),
- ri-applica i floor e, se serve, scende di bucket. Usa SEMPRE la stessa policy del tentativo 1.
- """
- v2 = max(MIN_VIDEO_KBPS, int(new_video_kbps))
- min_floor_val = _floor_value_min(H265_FLOOR_KBPS)
- if v2 < min_floor_val:
- raise EncodeNotFeasible(
- f"Retry: v={v2} kb/s < min floor {min_floor_val} kb/s → split richiesto"
- )
- # bucket di partenza = quello del tentativo precedente (non si risale),
- # e comunque non oltre MAX_TARGET_HEIGHT
- start_bucket = min(prev_plan.vf_height, MAX_TARGET_HEIGHT)
- chosen_bucket = _select_bucket_for_v(v2, start_bucket, H265_FLOOR_KBPS)
- if not chosen_bucket:
- raise EncodeNotFeasible(
- f"Retry: nessun bucket soddisfa {v2} kb/s (min floor={min_floor_val}) → split richiesto"
- )
- # DOWNscale reale = confronto con la sorgente, NON con il max target.
- downscale_real = chosen_bucket < ctx.height
- v_final = v2
- if not downscale_real and v2 > ctx.source_video_kbps_est:
- v_cap = ctx.source_video_kbps_est
- if v_cap >= _floor_for_bucket(chosen_bucket, H265_FLOOR_KBPS):
- v_final = v_cap
- else:
- # Scendi ancora di bucket (se possibile) e ignora il cap
- lower_ok = _select_bucket_for_v(
- v2,
- _next_lower_bucket(chosen_bucket, H265_FLOOR_KBPS) or chosen_bucket,
- H265_FLOOR_KBPS
- )
- if lower_ok and lower_ok != chosen_bucket:
- chosen_bucket = lower_ok
- v_final = v2
- else:
- raise EncodeNotFeasible(
- "Retry: cap romperebbe il floor e non esistono bucket inferiori → split richiesto"
- )
- return TargetPlan(
- safe_total_kbps=int(v_final + prev_plan.target_audio_kbps_total), # ricalcolato
- target_audio_kbps_total=prev_plan.target_audio_kbps_total,
- v_kbps=int(v_final),
- vf_height=int(chosen_bucket),
- source_video_kbps_est=ctx.source_video_kbps_est,
- )
- def adjust_safe_limit_for_overshoot(safe_limit_bytes: int, actual_bytes: int) -> int:
- overs = max(0, actual_bytes - safe_limit_bytes)
- if overs <= 0:
- return safe_limit_bytes
- new_limit = int(safe_limit_bytes - overs * (1.0 + float(RETRY_SAFETY_PCT)))
- # non scendere sotto 16 MiB per evitare input strani
- return max(new_limit, 16 * 1024 * 1024)
- # ================= Docker/FFmpeg helpers (tue funzioni, con minimi adattamenti) =================
- def _docker_ffmpeg_args(cpus=None, cpuset=None, use_gpu=False):
- args = ['docker','run','--rm',
- '-e','DOCKER_CONFIG=/dev/null']
- if cpus: args += ['--cpus', str(cpus)]
- if cpuset: args += ['--cpuset-cpus', str(cpuset)]
- if use_gpu:
- args += ['--gpus=all',
- '-e','NVIDIA_VISIBLE_DEVICES=all',
- '-e','NVIDIA_DRIVER_CAPABILITIES=all']
- args += [
- '--security-opt','seccomp=unconfined',
- '--cap-add','SYS_NICE',
- '-v', docker_mount_point,
- 'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
- ]
- return args
- def _nvenc_cmd(
- file_path: str,
- out_path: str,
- vkbps: int,
- vf_h: int,
- is_10bit: bool, # non usato qui
- n_audio: int,
- ext_low: str, # compat
- audio_map_relidx: list[int] | None = None,
- force_stereo: bool = FORCE_STEREO,
- use_dplii_matrix: bool = True,
- keep_subtitles: bool = True, # nuovo: se True, mantieni i sottotitoli
- ) -> list[str]:
- cmd = _docker_ffmpeg_args(use_gpu=True) + [
- '-y','-nostdin','-hide_banner','-loglevel','error',
- '-hwaccel','cuda',
- '-i', file_path,
- '-vf', f'scale=-2:{vf_h}:force_original_aspect_ratio=decrease',
- ]
- # --- Mapping ---
- # Video principale
- cmd += ['-map','0:v:0']
- # Audio: o filtro esplicito o tutte
- if audio_map_relidx is not None:
- cmd += ['-map','-0:a'] # drop tutte le audio
- for idx in sorted(audio_map_relidx):
- cmd += ['-map', f'0:a:{idx}']
- eff_n_audio = len(audio_map_relidx)
- else:
- cmd += ['-map','0:a?']
- eff_n_audio = n_audio
- # Sottotitoli (se richiesto)
- if keep_subtitles:
- cmd += ['-map','0:s?']
- eff_n_subs = 1 # serve solo per flag copy; non puntiamo gli indici qui
- else:
- eff_n_subs = 0
- # --- Video (come tuo originale NVENC) ---
- cmd += [
- '-c:v','hevc_nvenc',
- '-rc','vbr_hq','-multipass','fullres',
- '-b:v', f'{int(vkbps)}k',
- '-maxrate', f'{int(vkbps*1.08)}k',
- '-bufsize', f'{int(vkbps*2.0)}k',
- '-rc-lookahead','32',
- '-spatial_aq','1','-temporal_aq','1','-aq-strength','8',
- '-bf','3','-b_ref_mode','middle',
- '-g','240',
- ]
- # --- Audio ---
- if eff_n_audio > 0:
- cmd += ['-c:a','libfdk_aac']
- # Default globale (copre eventuali stream non indicizzati)
- cmd += ['-b:a', f'{AUDIO_KBPS_PER_TRACK}k']
- # Impostazioni per ciascuna traccia audio
- for i in range(eff_n_audio):
- cmd += [f'-b:a:{i}', f'{AUDIO_KBPS_PER_TRACK}k']
- if force_stereo:
- cmd += [f'-ac:a:{i}', '2']
- if use_dplii_matrix:
- # indica encoding matrix dolby pro logic II (metadato utile)
- cmd += [f'-filter:a:{i}', 'aresample=matrix_encoding=dplii']
- # Titolo pulito della traccia
- cmd += [f'-metadata:s:a:{i}', f'title=AAC LC 2.0 {AUDIO_KBPS_PER_TRACK} kbps']
- # Disposition: prima audio default, le altre non-default
- cmd += ['-disposition:a:0','default']
- for i in range(1, eff_n_audio):
- cmd += [f'-disposition:a:{i}','0'] # rimuove default/forced se ereditati
- else:
- cmd += ['-an'] # nessuna traccia audio
- # --- Sottotitoli: copia "as-is" (MKV è perfetto per srt/ass/pgs/vobsub) ---
- if keep_subtitles and eff_n_subs:
- cmd += ['-c:s','copy']
- # opzionale: assicurati che nessun sottotitolo sia "default"
- # (evita sorprese quando ffmpeg ereditasse i flag)
- cmd += ['-disposition:s','0']
- # --- Metadati/Capitoli del container ---
- # Puliamo: BPS/NUMBER_OF_BYTES/encoder ecc. (ri-setteremo noi dopo il size-check)
- cmd += ['-map_metadata','-1']
- # Matroska container (lavoriamo solo in mkv)
- cmd.append(out_path)
- return cmd
- def _run(cmd: List[str]) -> Tuple[int, float, str]:
- start = time.time()
- p = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
- elapsed = time.time() - start
- return p.returncode, elapsed, (p.stderr.decode('utf-8', errors='ignore') if p.stderr else "")
- def run_encode_once(out_path: str, ctx: EncodeContext, plan: TargetPlan, audio_map_relidx: Optional[List[int]] = None) -> Tuple[int, float, int]:
- try:
- if os.path.exists(out_path):
- os.remove(out_path)
- except Exception:
- pass
- cmd = _nvenc_cmd(
- file_path=ctx.file_path,
- out_path=out_path,
- vkbps=int(plan.v_kbps),
- vf_h=int(plan.vf_height),
- is_10bit=ctx.is_10bit,
- n_audio=ctx.n_audio,
- ext_low=Path(ctx.file_path).suffix.lower(),
- audio_map_relidx=audio_map_relidx
- )
- eff_n_audio = (len(audio_map_relidx) if audio_map_relidx is not None else ctx.n_audio)
- logger.info(
- "[encode] start: out=%s, v=%s kb/s, a_total=%s kb/s, total=%s kb/s, vf_h=%s, audio_streams=%d, dur=%s (%.0fs)",
- out_path, plan.v_kbps, plan.target_audio_kbps_total, plan.safe_total_kbps,
- plan.vf_height, eff_n_audio, _fmt_hms(ctx.duration_s), ctx.duration_s
- )
- rc, elapsed, stderr = _run(cmd)
- out_size = os.path.getsize(out_path) if (rc == 0 and os.path.exists(out_path)) else -1
- if rc != 0:
- tail = (stderr[-400:] if stderr else "")
- logger.error(
- "[encode] failed: rc=%s, elapsed=%.1fs, out_exists=%s, stderr_tail=%s",
- rc, elapsed, os.path.exists(out_path), tail
- )
- else:
- # bitrate effettivo totale e fattore realtime
- eff_total_kbps = int((out_size * 8) / max(1.0, ctx.duration_s) / 1000)
- delta_pct = ( (eff_total_kbps / plan.safe_total_kbps - 1.0) * 100.0 ) if plan.safe_total_kbps else 0.0
- rt_factor = (ctx.duration_s / max(1e-6, elapsed))
- logger.info(
- "[encode] done: rc=0, elapsed=%.1fs (%.2fx realtime), size=%s (%d B), eff_total=%d kb/s "
- "(plan_total=%s, Δ=%.2f%%)",
- elapsed, rt_factor, _fmt_bytes(out_size), out_size,
- eff_total_kbps, plan.safe_total_kbps, delta_pct
- )
- return rc, elapsed, out_size
- # ===================== Split (tua funzione, resa robusta) =====================
- def split_into_streamable_parts(file_path: str, safe_limit_bytes: int, chat_id: int):
- """
- Split per dimensione con strumenti “container-aware”.
- - MP4: MP4Box -splits XM (preferito). Se manca MP4Box: fallback mkvmerge (MKV), opzionale remux in MP4.
- - MKV: mkvmerge --split size:XMiB
- - Altri: mkvmerge --split size:XM (opzionale remux in MP4 se FORCE_MP4_PARTS=True)
- - Nessun loop di affinamento: safe_limit_bytes è il target massimo per parte. Piccole variazioni possibili.
- Ritorna: lista di path (stringhe) delle parti create, oppure [file originale] se sotto soglia, oppure None su errore.
- Richiede: logger, send_telegram_message, eventuali FORCE_MP4_PARTS e _remux_mkv_to_mp4.
- """
- def _log_info(msg: str):
- try: logger.info(msg)
- except NameError: print(msg)
- def _log_warn(msg: str):
- try: logger.warning(msg)
- except NameError: print(f"WARNING: {msg}")
- def _have(tool: str) -> bool:
- return shutil.which(tool) is not None
- try:
- src = Path(file_path)
- if not src.exists():
- send_telegram_message(chat_id, "❌ File non trovato per split")
- return None
- if not safe_limit_bytes or safe_limit_bytes <= 0:
- send_telegram_message(chat_id, "❌ Limite per parte non valido (safe_limit_bytes)")
- return None
- total_size = src.stat().st_size
- if total_size <= safe_limit_bytes:
- return [str(src)]
- out_dir = src.parent
- ext = src.suffix.lower()
- # Dimensione target (MB interi, minimo 1)
- size_mb = max(1, int(safe_limit_bytes // (1024 * 1024)))
- size_mib = size_mb # alias per mkvmerge "MiB"
- created: List[Path] = []
- # Pulizia preventiva di output precedenti ricorrenti per pattern noti
- # MKV pattern: {stem}.split-001.mkv
- for p in out_dir.glob(f"{src.stem}.split-*.mkv"):
- try: p.unlink()
- except Exception: pass
- # MP4Box pattern: {stem}_0001.mp4
- for p in out_dir.glob(f"{src.stem}_*.mp4"):
- try: p.unlink()
- except Exception: pass
- if ext == ".mkv":
- if not _have("mkvmerge"):
- send_telegram_message(chat_id, "❌ mkvmerge non trovato (pacchetto mkvtoolnix)")
- return None
- base = out_dir / f"{src.stem}.split.mkv"
- cmd = ["mkvmerge", "-o", str(base), "--split", f"size:{size_mib}MiB", str(src)]
- _log_info(f"[split] MKV → {' '.join(shlex.quote(c) for c in cmd)}")
- run = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- rc = run.returncode
- if rc != 0:
- err = (run.stderr.decode('utf-8', 'ignore') if run.stderr else "")
- logger.error("[split] mkvmerge rc=%s, stderr(last400)=%s", rc, err[-400:])
- send_telegram_message(chat_id, "❌ mkvmerge: split fallito")
- return None
- mkv_parts = sorted(out_dir.glob(f"{src.stem}.split-*.mkv"))
- if not mkv_parts and base.exists():
- mkv_parts = [base]
- if not mkv_parts:
- send_telegram_message(chat_id, "❌ Nessuna parte creata (mkvmerge)")
- return None
- try:
- if FORCE_MP4_PARTS:
- mp4_parts = _remux_mkv_to_mp4(mkv_parts, chat_id)
- if mp4_parts is None:
- return None
- created = [Path(p) for p in mp4_parts]
- else:
- created = mkv_parts
- except NameError:
- created = mkv_parts
- elif ext == ".mp4":
- if _have("MP4Box"):
- cmd = ["MP4Box", "-splits", f"{size_mb}M", str(src)]
- _log_info(f"[split] MP4 → {' '.join(shlex.quote(c) for c in cmd)}")
- rc = subprocess.call(cmd, cwd=str(out_dir))
- if rc != 0:
- send_telegram_message(chat_id, "❌ MP4Box: split fallito")
- return None
- created = sorted([p for p in out_dir.glob("*.mp4") if p.stem.startswith(src.stem + "_")])
- if not created:
- created = sorted(out_dir.glob(f"{src.stem}_*.mp4"))
- elif _have("mkvmerge"):
- base = out_dir / f"{src.stem}.split.mkv"
- cmd = ["mkvmerge", "-o", str(base), "--split", f"size:{size_mb}M", str(src)]
- _log_info(f"[split] MP4→MKV (fallback) → {' '.join(shlex.quote(c) for c in cmd)}")
- rc = subprocess.call(cmd)
- if rc != 0:
- send_telegram_message(chat_id, "❌ mkvmerge: split fallito")
- return None
- mkv_parts = sorted(out_dir.glob(f"{src.stem}.split-*.mkv"))
- if not mkv_parts:
- send_telegram_message(chat_id, "❌ Nessuna parte creata (mkvmerge)")
- return None
- try:
- if FORCE_MP4_PARTS:
- mp4_parts = _remux_mkv_to_mp4(mkv_parts, chat_id)
- if mp4_parts is None:
- return None
- created = [Path(p) for p in mp4_parts]
- else:
- created = mkv_parts
- except NameError:
- created = mkv_parts
- else:
- send_telegram_message(chat_id, "❌ Nessuno strumento per split MP4 (installa GPAC/MP4Box o mkvtoolnix)")
- return None
- elif ext in (".avi", ".mov", ".m4v", ".ts", ".m2ts", ".flv", ".webm", ".mpeg", ".mpg"):
- if not _have("mkvmerge"):
- send_telegram_message(chat_id, f"❌ mkvmerge non trovato per split di {ext} (installa mkvtoolnix)")
- return None
- base = out_dir / f"{src.stem}.split.mkv"
- cmd = ["mkvmerge", "-o", str(base), "--split", f"size:{size_mb}M", str(src)]
- _log_info(f"[split] {ext.upper()}→MKV → {' '.join(shlex.quote(c) for c in cmd)}")
- rc = subprocess.call(cmd)
- if rc != 0:
- send_telegram_message(chat_id, "❌ mkvmerge: split fallito")
- return None
- mkv_parts = sorted(out_dir.glob(f"{src.stem}.split-*.mkv"))
- if not mkv_parts:
- send_telegram_message(chat_id, "❌ Nessuna parte creata (mkvmerge)")
- return None
- try:
- if FORCE_MP4_PARTS:
- mp4_parts = _remux_mkv_to_mp4(mkv_parts, chat_id)
- if mp4_parts is None:
- return None
- created = [Path(p) for p in mp4_parts]
- else:
- created = mkv_parts
- except NameError:
- created = mkv_parts
- else:
- send_telegram_message(chat_id, f"❌ Estensione non supportata per split: {ext}")
- return None
- if not created:
- send_telegram_message(chat_id, "❌ Split: nessun output rilevato")
- return None
- overs = [p for p in created if p.stat().st_size > safe_limit_bytes]
- if overs:
- worst = max(overs, key=lambda p: p.stat().st_size)
- _log_warn(f"[split] Una parte supera il target richiesto: {worst.name} = {worst.stat().st_size // (1024*1024)} MB")
- return [str(p) for p in created]
- except Exception as e:
- try: logger.exception(f"[split] errore inatteso: {e}")
- except NameError: print(f"[split] errore inatteso: {e}")
- try: send_telegram_message(chat_id, f"❌ Errore nello split: {e}")
- except NameError: pass
- return None
- # ===================== CHECK_FILE (completa) =====================
- def check_file(file_path: str, chat_id: int, item_id: int, app) -> dict:
- """
- - Se originale <= limite: original_ok.
- - Altrimenti:
- Tentativo 1: calcolo plan con limite "fudgiato" (solo qui).
- Se OK (<= limite reale): replace e encoded_ok.
- Se overshoot e RETRY_ON_OVERSHOOT=True: Tentativo 2.
- - Mantiene audio e vf_height del 1°
- - Ricalcola SOLO v_kbps 'spalmando' l'eccesso + safety.
- Se 2° OK: replace e encoded_ok.
- Altrimenti: too_big (lo split lo farà chi chiama).
- """
- try:
- src = Path(file_path)
- if not src.exists():
- msg = "❌ File non trovato"
- try: send_telegram_message(chat_id, msg)
- except Exception: pass
- return {"status": "error", "msg": msg, "path": None}
- orig_size = src.stat().st_size
- if orig_size <= safe_limit_bytes:
- # logger.info("[check] Originale sotto soglia: %s <= %s", orig_size, safe_limit_bytes)
- logger.info(
- "[check] originale <= limite: size=%s (%d B) <= limit=%s (%d B) — skip encode.",
- _fmt_bytes(orig_size), orig_size, _fmt_bytes(safe_limit_bytes), safe_limit_bytes
- )
- return {"status": "original_ok", "path": str(src), "size": orig_size}
- if app == "sonarr":
- api_key = sonarr_api
- host = sonarr_host
- port = sonarr_port
- elif app == "radarr":
- api_key = radarr_api
- host = radarr_host
- port = radarr_port
- else:
- raise ValueError("app must be 'radarr' or 'sonarr'")
- profile_language = get_profile_language_iso639_2(app, api_key, host, port, item_id)
- langs = _analyze_audio_languages_with_docker(src)
- filter_enabled, keep_relidx, notify_msg, summary_str = _decide_audio_selection(profile_language, langs)
- # Log elenco tracce rilevate
- logger.info("[audio] Tracce: %s", summary_str)
- # Notifiche all'owner, se richieste dalla logica
- if notify_msg:
- try:
- send_telegram_message(owner_chatid, f"ℹ️ {src.name}: {notify_msg}")
- except Exception:
- pass
- ctx = extract_encode_context(str(src))
- # Report keep/remove
- if filter_enabled:
- keep_1based = [i+1 for i in sorted(keep_relidx)]
- all_1based = [int(t.get("track")) for t in langs]
- remove_1based = [t for t in all_1based if t not in keep_1based]
- # logger.info("[audio] Keep: %s | Remove: %s",
- # ", ".join(f"#{i}" for i in keep_1based) if keep_1based else "(none)",
- # ", ".join(f"#{i}" for i in remove_1based) if remove_1based else "(none)")
- logger.info(
- "[audio] selection: keep=%s (%d/%d), remove=%s",
- ", ".join(f"#{i}" for i in keep_1based) if keep_1based else "(none)",
- len(keep_1based), len(langs),
- ", ".join(f"#{i}" for i in remove_1based) if remove_1based else "(none)"
- )
- else:
- logger.info("[audio] selection: keep=all (%d/%d), remove=(none)", ctx.n_audio, ctx.n_audio)
- # Context e output temporaneo
- # ctx = extract_encode_context(str(src))
- out_tmp = str(src.with_name(f"{src.stem}.encode{REENCODE_OUTPUT_EXT}"))
- try:
- if os.path.exists(out_tmp):
- os.remove(out_tmp)
- except Exception:
- pass
- dur_txt = _fmt_hms(ctx.duration_s)
- try:
- send_telegram_message(
- chat_id,
- "🗜️ Provo compressione GPU (HEVC NVENC, multipass)…"
- )
- logger.info(
- "[check] source: name=%s, dur=%s (%.0fs), size=%s (%d B), limit=%s (%d B), app=%s, profile_lang=%s",
- src.name, _fmt_hms(ctx.duration_s), ctx.duration_s,
- _fmt_bytes(orig_size), orig_size,
- _fmt_bytes(safe_limit_bytes), safe_limit_bytes,
- app, profile_language or "n/a"
- )
- a_src_est = max(0, ctx.source_total_kbps_est - ctx.source_video_kbps_est)
- logger.info(
- "[src] video: %dx%d, %s-bit, v_src≈%d kb/s",
- ctx.width, ctx.height, ("10" if ctx.is_10bit else "8"), ctx.source_video_kbps_est
- )
- logger.info(
- "[src] audio: tracks=%d, a_src_est≈%d kb/s",
- ctx.n_audio, a_src_est
- )
- logger.info(
- "[src] container: total_src≈%d kb/s, dur=%s (%.0fs), size=%s (%d B), ext=%s",
- ctx.source_total_kbps_est, dur_txt, ctx.duration_s,
- _fmt_bytes(orig_size), orig_size, ctx.ext_low
- )
- except Exception:
- pass
- # ---- Tentativo 1 (fudge -1% sul limite totale) ----
- limit_try1 = int(safe_limit_bytes * (1.0 - FUDGE_VBR_FIRST_TRY))
- n_audio_eff = len(keep_relidx) if filter_enabled else ctx.n_audio
- plan1 = compute_target_for_size(ctx, limit_try1, n_audio_override=n_audio_eff)
- # logger.info("[check] Tentativo 1 → target_kbps_tot=%s, audio=%s, video=%s, vf_h=%s",
- # plan1.safe_total_kbps, plan1.target_audio_kbps_total, plan1.v_kbps, plan1.vf_height)
- logger.info(
- "[plan] try#1: total=%s kb/s (v=%s, a=%s), vf_h=%s, limit_fudged=%s (%d B, fudge=%.2f%%), dur=%s (%.0fs)",
- plan1.safe_total_kbps, plan1.v_kbps, plan1.target_audio_kbps_total, plan1.vf_height,
- _fmt_bytes(limit_try1), limit_try1, (FUDGE_VBR_FIRST_TRY * 100.0),
- _fmt_hms(ctx.duration_s), ctx.duration_s
- )
- # Se filtriamo, passiamo la mappa esplicita; altrimenti None per comportamento corrente
- audio_map = keep_relidx if filter_enabled else None
- rc1, el1, out1 = run_encode_once(out_tmp, ctx, plan1, audio_map_relidx=audio_map)
- logger.info("[encode] v_kbps=%s, vf_h=%s, audio_kbps_total=%s, safe_total_kbps=%s, src_v_kbps=%s",
- plan1.v_kbps, plan1.vf_height, plan1.target_audio_kbps_total,
- plan1.safe_total_kbps, getattr(plan1, "source_video_kbps_est", "n/a"))
- if rc1 != 0 or out1 <= 0 or not os.path.exists(out_tmp):
- try: send_telegram_message(chat_id, "❌ Compress. fallita (1° tentativo).")
- except Exception: pass
- try:
- if os.path.exists(out_tmp):
- os.remove(out_tmp)
- except Exception:
- pass
- return {"status": "error", "path": str(src), "msg": "encode1_failed"}
- logger.info("[encode] OK rc=%s, size=%s bytes", rc1, out1)
- if out1 <= safe_limit_bytes:
- # SUCCESSO → sostituisco l'originale
- try:
- dest_path = src.with_suffix(REENCODE_OUTPUT_EXT)
- os.replace(out_tmp, dest_path)
- if dest_path != src:
- try:
- os.remove(src)
- except Exception:
- pass
- try:
- eff_n_audio = (len(audio_map) if audio_map is not None else ctx.n_audio)
- update_mkv_metadata_final(dest_path, src, eff_n_audio)
- logger.info("[final] mkvpropedit: track statistics tags rigenerati + segment title/default audio impostati.")
- except Exception:
- pass
- final_size = os.path.getsize(dest_path)
- try:
- eff_total_kbps_1 = int((final_size * 8) / max(1.0, ctx.duration_s) / 1000)
- margin_pct_1 = (1.0 - (final_size / safe_limit_bytes)) * 100.0
- logger.info(
- "[final] try#1 OK: size=%s (%d B) ≤ limit=%s (%d B), margin=%.2f%%, "
- "eff_total=%d kb/s vs plan_total=%s, v=%s, vf_h=%s, audio_streams=%d, dest=%s",
- _fmt_bytes(final_size), final_size,
- _fmt_bytes(safe_limit_bytes), safe_limit_bytes, margin_pct_1,
- eff_total_kbps_1, plan1.safe_total_kbps, plan1.v_kbps, plan1.vf_height,
- (len(audio_map) if filter_enabled else ctx.n_audio),
- str(dest_path)
- )
- size_factor = (orig_size / final_size) if final_size > 0 else 0.0
- size_redux_pct = 100.0 * (1.0 - (final_size / orig_size))
- tot_redux_pct = 100.0 * (1.0 - (eff_total_kbps_1 / max(1, ctx.source_total_kbps_est)))
- v_redux_pct = 100.0 * (1.0 - (plan1.v_kbps / max(1, ctx.source_video_kbps_est)))
- logger.info(
- "[final] delta#1: total≈%d→%d kb/s (Δ=%.2f%%), v≈%d→%d kb/s (plan, Δ=%.2f%%), "
- "size %s→%s (x%.2f, Δ=%.2f%%), vf_h=%s, a_streams=%d",
- ctx.source_total_kbps_est, eff_total_kbps_1, tot_redux_pct,
- ctx.source_video_kbps_est, plan1.v_kbps, v_redux_pct,
- _fmt_bytes(orig_size), _fmt_bytes(final_size), size_factor, size_redux_pct,
- plan1.vf_height, (len(audio_map) if filter_enabled else ctx.n_audio)
- )
- except Exception:
- pass
- except Exception:
- try: os.remove(out_tmp)
- except Exception: pass
- final_size = out1 # best effort
- try:
- send_telegram_message(chat_id, f"✅ Compressione ok: {_fmt_bytes(final_size)} (limite {_fmt_bytes(safe_limit_bytes)})")
- except Exception:
- pass
- # return {"status": "encoded_ok", "path": file_path, "size": final_size,
- return {"status": "encoded_ok", "path": dest_path, "size": final_size,
- "plan": {"vf_height": plan1.vf_height, "v_kbps": plan1.v_kbps,
- "audio_kbps": plan1.target_audio_kbps_total,
- "safe_total_kbps": plan1.safe_total_kbps,
- "src_video_kbps_est": getattr(plan1, "source_video_kbps_est", None)}}
- # ---- Overshoot → valutazione retry ----
- if not RETRY_ON_OVERSHOOT:
- try:
- send_telegram_message(chat_id, f"ℹ️ Output {_fmt_bytes(out1)} > limite ({_fmt_bytes(safe_limit_bytes)}). Retry disattivato.")
- logger.info(
- "[overshoot] try#1: size=%s (%d B) > limit=%s (%d B) by %.2f%% — retry disattivato.",
- _fmt_bytes(out1), out1, _fmt_bytes(safe_limit_bytes), safe_limit_bytes,
- ((out1 - safe_limit_bytes) / safe_limit_bytes) * 100.0
- )
- except Exception:
- pass
- try:
- if os.path.exists(out_tmp):
- os.remove(out_tmp)
- except Exception:
- pass
- return {"status": "too_big", "path": str(src), "size": out1, "msg": "overshoot_no_retry"}
- overs_bytes = out1 - safe_limit_bytes
- pct = (overs_bytes / safe_limit_bytes) * 100.0
- min_floor = _floor_value_min(H265_FLOOR_KBPS)
- # v2 = _compute_retry_v_kbps(ctx.duration_s, out1, safe_limit_bytes, plan1.v_kbps, RETRY_SAFETY_PCT,
- # floor_kbps=max(64, int(getattr(plan1, "v_kbps_floor", 200))))
- v2 = _compute_retry_v_kbps(
- ctx.duration_s, out1, safe_limit_bytes, plan1.v_kbps, RETRY_SAFETY_PCT,
- floor_kbps=min_floor
- )
- try:
- plan2 = adjust_plan_after_retry(ctx, plan1, v2) # usa H265_FLOOR_KBPS e MAX_TARGET_HEIGHT interni/globali
- except EncodeNotFeasible as e:
- # Retry non fattibile (v2 < floor minore, o nessun bucket compatibile) → chiedi split
- try:
- send_telegram_message(chat_id,
- f"❌ Retry impossibile: {e}. Suggerito split.")
- except Exception:
- pass
- try:
- if os.path.exists(out_tmp):
- os.remove(out_tmp)
- except Exception:
- pass
- return {"status": "too_big", "path": str(src), "size": out1, "msg": "overshoot_retry_unfeasible"}
- # CHANGE: ora invia il messaggio includendo eventuale cambio di risoluzione
- try:
- send_telegram_message(
- chat_id,
- "ℹ️ Overshoot: {} > limite ({}) di {:.2f}%.\n"
- "Retry: video {} → {} kb/s; risoluzione {} → {}."
- .format(_fmt_bytes(out1), _fmt_bytes(safe_limit_bytes), pct,
- plan1.v_kbps, plan2.v_kbps, plan1.vf_height, plan2.vf_height)
- )
- logger.info(
- "[overshoot] try#1: size=%s (%d B) > limit=%s (%d B) by %.2f%% — "
- "retry plan: v=%s→%s kb/s, vf_h=%s→%s",
- _fmt_bytes(out1), out1, _fmt_bytes(safe_limit_bytes), safe_limit_bytes, pct,
- plan1.v_kbps, plan2.v_kbps, plan1.vf_height, plan2.vf_height
- )
- except Exception:
- pass
- logger.info(
- "[plan] try#2: total=%s kb/s (v=%s, a=%s), vf_h=%s",
- plan2.safe_total_kbps, plan2.v_kbps, plan2.target_audio_kbps_total, plan2.vf_height
- )
- rc2, el2, out2 = run_encode_once(out_tmp, ctx, plan2, audio_map_relidx=audio_map)
- logger.info("[encode] v_kbps=%s, vf_h=%s, audio_kbps_total=%s, safe_total_kbps=%s, src_v_kbps=%s",
- plan2.v_kbps, plan2.vf_height, plan2.target_audio_kbps_total,
- plan2.safe_total_kbps, getattr(plan2, "source_video_kbps_est", "n/a"))
- if rc2 != 0 or out2 <= 0 or not os.path.exists(out_tmp):
- try: send_telegram_message(chat_id, "❌ Compress. fallita (2° tentativo).")
- except Exception: pass
- try:
- if os.path.exists(out_tmp):
- os.remove(out_tmp)
- except Exception:
- pass
- return {"status": "error", "path": str(src), "msg": "encode2_failed"}
- logger.info("[encode] OK rc=%s, size=%s bytes", rc2, out2)
- if out2 <= safe_limit_bytes:
- try:
- dest_path = src.with_suffix(REENCODE_OUTPUT_EXT)
- os.replace(out_tmp, dest_path)
- if dest_path != src:
- try:
- os.remove(src)
- except Exception:
- pass
- try:
- eff_n_audio = (len(audio_map) if audio_map is not None else ctx.n_audio)
- update_mkv_metadata_final(dest_path, src, eff_n_audio)
- logger.info("[final] mkvpropedit: track statistics tags rigenerati + segment title/default audio impostati.")
- except Exception:
- pass
- final_size = os.path.getsize(dest_path)
- try:
- eff_total_kbps_2 = int((final_size * 8) / max(1.0, ctx.duration_s) / 1000)
- margin_pct_2 = (1.0 - (final_size / safe_limit_bytes)) * 100.0
- logger.info(
- "[final] try#2 OK: size=%s (%d B) ≤ limit=%s (%d B), margin=%.2f%%, "
- "eff_total=%d kb/s vs plan_total=%s, v=%s, vf_h=%s, audio_streams=%d, dest=%s",
- _fmt_bytes(final_size), final_size,
- _fmt_bytes(safe_limit_bytes), safe_limit_bytes, margin_pct_2,
- eff_total_kbps_2, plan2.safe_total_kbps, plan2.v_kbps, plan2.vf_height,
- (len(audio_map) if filter_enabled else ctx.n_audio),
- str(dest_path)
- )
- size_factor = (orig_size / final_size) if final_size > 0 else 0.0
- size_redux_pct = 100.0 * (1.0 - (final_size / orig_size))
- tot_redux_pct = 100.0 * (1.0 - (eff_total_kbps_2 / max(1, ctx.source_total_kbps_est)))
- v_redux_pct = 100.0 * (1.0 - (plan2.v_kbps / max(1, ctx.source_video_kbps_est)))
- logger.info(
- "[final] delta#2: total≈%d→%d kb/s (Δ=%.2f%%), v≈%d→%d kb/s (plan, Δ=%.2f%%), "
- "size %s→%s (x%.2f, Δ=%.2f%%), vf_h=%s, a_streams=%d",
- ctx.source_total_kbps_est, eff_total_kbps_2, tot_redux_pct,
- ctx.source_video_kbps_est, plan2.v_kbps, v_redux_pct,
- _fmt_bytes(orig_size), _fmt_bytes(final_size), size_factor, size_redux_pct,
- plan2.vf_height, (len(audio_map) if filter_enabled else ctx.n_audio)
- )
- except Exception:
- pass
- except Exception:
- try: os.remove(out_tmp)
- except Exception: pass
- final_size = out2
- try:
- send_telegram_message(chat_id, f"✅ Compressione ok al 2° tentativo: {_fmt_bytes(final_size)} (limite {_fmt_bytes(safe_limit_bytes)})")
- except Exception:
- pass
- # return {"status": "encoded_ok", "path": file_path, "size": final_size,
- return {"status": "encoded_ok", "path": dest_path, "size": final_size,
- "plan": {"vf_height": plan2.vf_height, "v_kbps": plan2.v_kbps,
- "audio_kbps": plan2.target_audio_kbps_total,
- "safe_total_kbps": plan2.safe_total_kbps,
- "src_video_kbps_est": getattr(plan2, "source_video_kbps_est", None)}}
- # Ancora troppo grande → lasciare la decisione di split al chiamante
- try:
- if os.path.exists(out_tmp):
- os.remove(out_tmp)
- except Exception:
- pass
- try:
- send_telegram_message(chat_id, f"ℹ️ Output ancora > limite ({_fmt_bytes(safe_limit_bytes)}).")
- except Exception:
- pass
- logger.info(
- "[final] try#2 still too big: size=%s (%d B) > limit=%s (%d B) by %.2f%% — split demandato al chiamante.",
- _fmt_bytes(out2), out2, _fmt_bytes(safe_limit_bytes), safe_limit_bytes,
- ((out2 - safe_limit_bytes) / safe_limit_bytes) * 100.0
- )
- return {"status": "too_big", "path": str(src), "size": out2, "msg": "overshoot_after_retry"}
- except Exception as e:
- logger.exception("[check_file] eccezione: %s", e)
- try: send_telegram_message(chat_id, f"❌ Errore check_file: {e}")
- except Exception: pass
- return {"status": "error", "path": None, "msg": str(e)}
- def parse_manual_args():
- p = argparse.ArgumentParser(prog="manual")
- p.add_argument("--file", required=True)
- p.add_argument("--chat-id", required=True, type=int)
- p.add_argument("--entity", required=False, default=None)
- p.add_argument('--split', metavar='MB', type=int, default=None, help='Forza lo split manuale, dimensione massima di ogni parte in MB')
- return p.parse_args(sys.argv[2:])
- def run_manual_send(file_path, chat_id, entity, split_mb=None):
- file_path = str(Path(file_path).resolve())
- logger.info(f"[MANUAL] file={file_path} chat_id={chat_id} entity={entity} split={split_mb}")
- if not os.path.exists(file_path):
- send_telegram_message(chat_id, "❌ File non trovato")
- return 1
- # 1) stessa sequenza del flusso *arr: prima compressione (se serve)
- try:
- check_file(file_path, chat_id)
- except Exception as e:
- logger.warning(f"[MANUAL] check_file ha segnalato: {e}")
- # Nessuno split richiesto
- if split_mb is None:
- ok = send_file_to_telegram(file_path, chat_id, entity)
- return 0 if ok else 1
- # Split richiesto: uso il valore passato in MB come hard limit per parte
- if split_mb <= 0:
- send_telegram_message(chat_id, "❌ Valore --split non valido (deve essere > 0)")
- return 1
- target_bytes = split_mb * 1024 * 1024
- # Evita di superare il limite telegrafico (safe_limit_bytes): clamp e avviso
- if target_bytes > safe_limit_bytes:
- logger.warning(f"[MANUAL] --split={split_mb}MB supera il limite per parte; verrà usato {int(safe_limit_bytes/1024/1024)}MB")
- target_bytes = safe_limit_bytes
- logger.info(f"[MANUAL] Forzo split in parti da ~{int(target_bytes/1024/1024)} MB")
- parts = split_into_streamable_parts(file_path, target_bytes, chat_id)
- if not parts:
- send_telegram_message(chat_id, "❌ Split manuale fallito")
- return 1
- # Invio parti e pulizia
- sent_all = True
- for p in parts:
- ok = send_file_to_telegram(p, chat_id, entity)
- if not ok:
- sent_all = False
- break
- # Cancella solo le parti create (non il file originale)
- if sent_all:
- orig = os.path.abspath(file_path)
- for p in parts:
- try:
- if os.path.abspath(p) != orig:
- os.remove(p)
- except Exception:
- pass
- return 0 if sent_all else 1
- def send_telegram_message(chat_id, message, parse_mode=None):
- # pass
- try:
- data = {
- "chat_id": chat_id,
- "text": message
- }
- if parse_mode:
- data["parse_mode"] = parse_mode
- response = requests.post(
- f"https://api.telegram.org/bot{telegram_bot_token}/sendMessage",
- data=data
- )
- response.raise_for_status()
- except requests.RequestException as e:
- logger.error("Failed to send message to Telegram: %s", str(e))
- def extract_telegram_data(tags, keyword, extract_type='username'):
- if not tags:
- return 'Unknown user' if extract_type == 'username' else 'Unknown chatid'
- tag_list = tags.split('|')
- relevant_tags = [tag for tag in tag_list if keyword in tag]
- if not relevant_tags:
- return 'Unknown user' if extract_type == 'username' else 'Unknown chatid'
- last_tag = relevant_tags[-1]
- parts = last_tag.split('-')
- if len(parts) < 3:
- logger.error(f"Invalid '{last_tag}' tag: must contain 3 parts separated by '-'")
- if extract_type == 'username':
- return '@' + parts[1]
- elif extract_type == 'chatid':
- return parts[2]
- else:
- raise ValueError("extract_type must be 'username' or 'chatid'")
- def get_unique_tags(tags, keyword):
- if not tags:
- return []
- tag_list = tags.split('|')
- relevant_tags = [tag for tag in tag_list if keyword in tag]
- unique_tags = []
- for tag in relevant_tags:
- username = extract_telegram_data(tag, keyword, 'username')
- if username not in [extract_telegram_data(t, keyword, 'username') for t in unique_tags]:
- unique_tags.append(tag)
- return unique_tags
- def humanize_size(size_in_bytes):
- size_in_mb = size_in_bytes / 1000000.0 # Convert to MB
- if size_in_mb > 1000:
- return f"{size_in_mb / 1000:.2f} GB"
- else:
- return f"{size_in_mb:.0f} MB"
- def get_media_info(file_path):
- """
- Ritorna un dict con:
- duration_s, width, height,
- total_bitrate_kbps (se ffprobe lo fornisce),
- n_audio, audio_kbps_total (somma bit_rate stream audio se disponibili),
- v_codec, pix_fmt, color_primaries, transfer, matrix,
- is_10bit, is_hdr
- """
- base_cmd = [
- 'docker','run','--rm',
- '-e','DOCKER_CONFIG=/dev/null',
- '--entrypoint','/usr/bin/ffprobe',
- '-v', docker_mount_point,
- 'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
- '-v','error',
- '-print_format','json',
- '-show_format','-show_streams',
- file_path
- ]
- try:
- out = subprocess.check_output(base_cmd).decode('utf-8', errors='ignore')
- import json
- data = json.loads(out)
- fmt = data.get('format', {}) or {}
- streams = data.get('streams', []) or []
- duration_s = float(fmt.get('duration') or 0) or 0.0
- total_bitrate_kbps = None
- if fmt.get('bit_rate'):
- try:
- total_bitrate_kbps = int(int(fmt['bit_rate'])/1000)
- except Exception:
- total_bitrate_kbps = None
- v = next((s for s in streams if s.get('codec_type')=='video'), None)
- width = (v or {}).get('width') or 0
- height = (v or {}).get('height') or 0
- v_codec = (v or {}).get('codec_name') or ''
- pix_fmt = (v or {}).get('pix_fmt') or ''
- color_primaries = (v or {}).get('color_primaries') or ''
- transfer = (v or {}).get('color_transfer') or ''
- matrix = (v or {}).get('color_space') or ''
- is_10bit = ('10' in (v or {}).get('bits_per_raw_sample','')) or pix_fmt in ('p010le','yuv420p10le')
- # HDR rozza: BT.2020 o transfer SMPTE2084/HLG
- is_hdr = ('bt2020' in (color_primaries or '').lower()) or ('smpte2084' in (transfer or '').lower()) or ('arib-std-b67' in (transfer or '').lower())
- # audio
- a_streams = [s for s in streams if s.get('codec_type')=='audio']
- n_audio = len(a_streams)
- audio_kbps_total = 0
- for a in a_streams:
- try:
- br = int(a.get('bit_rate') or 0)
- if br > 0:
- audio_kbps_total += int(br/1000)
- else:
- audio_kbps_total += AUDIO_KBPS_PER_TRACK
- except Exception:
- audio_kbps_total += AUDIO_KBPS_PER_TRACK
- if n_audio == 0:
- audio_kbps_total = 0
- return {
- 'duration_s': max(1.0, duration_s),
- 'width': int(width or 0),
- 'height': int(height or 0),
- 'total_bitrate_kbps': total_bitrate_kbps,
- 'n_audio': n_audio,
- 'audio_kbps_total': int(audio_kbps_total),
- 'v_codec': v_codec,
- 'pix_fmt': pix_fmt,
- 'color_primaries': color_primaries,
- 'transfer': transfer,
- 'matrix': matrix,
- 'is_10bit': bool(is_10bit),
- 'is_hdr': bool(is_hdr),
- }
- except Exception as e:
- logger.warning(f"⚠️ ffprobe media info fallito: {e}")
- return {
- 'duration_s': 1.0, 'width': 1920, 'height': 1080,
- 'total_bitrate_kbps': None,
- 'n_audio': 1, 'audio_kbps_total': AUDIO_KBPS_PER_TRACK,
- 'v_codec': '', 'pix_fmt':'', 'color_primaries':'', 'transfer':'', 'matrix':'',
- 'is_10bit': False, 'is_hdr': False
- }
- # Wrapper compat con la tua vecchia firma
- def get_video_info(file_path):
- mi = get_media_info(file_path)
- return int(mi['duration_s']), int(mi['width']), int(mi['height'])
- async def generate_thumbnail(video_path):
- """Genera thumbnail per abilitare streaming MKV"""
- try:
- thumb_path = f"/tmp/thumb_{os.path.basename(video_path)}.jpg"
- ffmpeg_cmd = [
- 'docker', 'run', '--rm', '--entrypoint', '/usr/bin/ffmpeg',
- '-v', docker_mount_point,
- 'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
- '-i', video_path,
- '-ss', '00:01:00',
- '-vframes', '1',
- '-q:v', '2',
- '-y', thumb_path
- ]
- subprocess.run(ffmpeg_cmd, check=True, capture_output=True)
- if os.path.exists(thumb_path):
- return thumb_path
- return None
- except Exception as e:
- logger.error(f"💥 Errore generazione thumbnail: {e}")
- return None
- def load_telegram_config():
- """Carica configurazione Telegram da file JSON"""
- try:
- config_file = "/scripts/telegram_upload/telethon_upload.json"
- if not os.path.exists(config_file):
- logger.error(f"❌ File configurazione non trovato: {config_file}")
- return None, None
- with open(config_file, 'r') as f:
- config = json.load(f)
- api_id = config.get('api_id')
- api_hash = config.get('api_hash')
- if not api_id or not api_hash:
- logger.error("❌ api_id o api_hash mancanti nel file di configurazione")
- return None, None
- print("✅ Configurazione Telegram caricata")
- return api_id, api_hash
- except Exception as e:
- logger.error(f"💥 Errore caricamento configurazione: {e}")
- return None, None
- def _have(cmd): return which(cmd) is not None
- def _remux_mkv_to_mp4(mkv_parts, chat_id):
- out = []
- for mkv in mkv_parts:
- mp4 = mkv.with_suffix(".mp4")
- cmd = [
- 'docker','run','--rm',
- '-v', docker_mount_point,
- 'ghcr.io/aperim/nvidia-cuda-ffmpeg:latest',
- '-hide_banner','-loglevel','error',
- '-i', str(mkv),
- '-c','copy','-movflags','+faststart',
- str(mp4)
- ]
- rc = subprocess.call(cmd)
- if rc != 0 or not mp4.exists():
- send_telegram_message(chat_id, f"❌ Remux MKV→MP4 fallito: {mkv.name}")
- return None
- out.append(mp4)
- return out
- def get_profile_language_iso639_2(app, api_key, host, port, entity_id):
- base = host.rstrip("/")
- if port and f":{port}" not in base:
- base = f"{base}:{port}"
- headers = {"X-Api-Key": api_key}
- if app == "radarr":
- movie = requests.get(f"{base}/api/v3/movie/{entity_id}", headers=headers).json()
- qpid = movie["qualityProfileId"]
- qp = requests.get(f"{base}/api/v3/qualityprofile/{qpid}", headers=headers).json()
- lang_name = qp["language"]["name"]
- return pycountry.languages.lookup(lang_name).alpha_3.lower()
- elif app == "sonarr":
- # 1) prendi Quality Profile della serie
- series = requests.get(f"{base}/api/v3/series/{entity_id}", headers=headers).json()
- qpid = series["qualityProfileId"]
- qp = requests.get(f"{base}/api/v3/qualityprofile/{qpid}", headers=headers).json()
- # 2) mappa Custom Formats e trova il CF di tipo LanguageSpecification con score più alto
- all_cf = requests.get(f"{base}/api/v3/customformat", headers=headers).json()
- cf_by_id = {cf["id"]: cf for cf in all_cf}
- best = None # (score, lang_id)
- for it in qp.get("formatItems", []):
- ref = it.get("format")
- if isinstance(ref, int):
- cf_id = ref
- elif isinstance(ref, dict):
- cf_id = ref.get("id") or ref.get("customFormatId")
- else:
- continue
- cf = cf_by_id.get(cf_id)
- if not cf:
- continue
- for spec in cf.get("specifications", []):
- impl = (spec.get("implementation") or spec.get("implementationName") or "")
- if "LanguageSpecification" not in impl:
- continue
- fields = spec.get("fields")
- val = None
- if isinstance(fields, dict):
- val = fields.get("value")
- elif isinstance(fields, list):
- for f in fields:
- if isinstance(f, dict) and f.get("name") == "value":
- val = f.get("value")
- break
- if val is None:
- continue
- score = it.get("score", 0)
- if best is None or score > best[0]:
- best = (score, int(val))
- # 3) gestisci "Original Language" o assenza
- if not best or best[1] < 0:
- return "und"
- lang_id = best[1]
- # 4) risolvi ID -> nome canonico tramite API Sonarr
- langs = requests.get(f"{base}/api/v3/language", headers=headers).json()
- lang_name = None
- for L in langs:
- if L.get("id") == lang_id:
- lang_name = L.get("name")
- break
- if not lang_name:
- return "und"
- # 5) normalizza pochi casi noti e ritorna ISO 639-2
- norm = {
- "flemish": "dutch",
- "portuguese (brazil)": "portuguese",
- }
- key = lang_name.lower()
- lang_for_lookup = norm.get(key, lang_name)
- return pycountry.languages.lookup(lang_for_lookup).alpha_3.lower()
- else:
- raise ValueError("app must be 'radarr' o 'sonarr'")
- def send_file_to_telegram(file_path, chat_id, telegram_username):
- """Invia un file/parti streammabili rispettando il limite di 2 GB per file."""
- # Verifica Telethon
- if not TELETHON_AVAILABLE:
- logger.error("❌ Telethon non disponibile - ABORT")
- send_telegram_message(owner_chatid, "❌ Errore: Telethon non installato")
- return False
- # Carica configurazione
- api_id, api_hash = load_telegram_config()
- if not api_id or not api_hash:
- logger.error("❌ Configurazione mancante - ABORT")
- send_telegram_message(owner_chatid, "❌ Errore: Configurazione Telegram mancante")
- return False
- file_size = os.path.getsize(file_path)
- # Prepara lista file da inviare (singolo o parti)
- files_to_send = [file_path]
- if file_size > safe_limit_bytes:
- send_telegram_message(chat_id, f"📦 File ancora > 2GB ({file_size // 1024 // 1024} MB). Avvio split in parti…")
- parts = split_into_streamable_parts(file_path, safe_limit_bytes, chat_id)
- if not parts:
- send_telegram_message(chat_id, "❌ Split fallito")
- return False
- files_to_send = parts
- send_telegram_message(chat_id, f"✅ Create {len(parts)} parti (< {safe_limit_bytes//1024//1024} MB ciascuna)")
- async def async_send():
- session_file = "/scripts/telegram_upload/telethon_upload"
- client = TelegramClient(session_file, api_id, api_hash)
- try:
- print("🔄 Connessione a Telegram...")
- await asyncio.wait_for(client.start(), timeout=30)
- print("✅ Connesso a Telegram!")
- send_telegram_message(chat_id, f"✅ Invio in corso...")
- total = len(files_to_send)
- for idx, path in enumerate(files_to_send, start=1):
- # Metadati per streaming + thumbnail
- duration, width, height = get_video_info(path)
- thumbnail_path = await generate_thumbnail(path)
- caption = None
- if total > 1:
- caption = f"{os.path.basename(file_path)}\nParte {idx}/{total}"
- def progress_callback(current, total_bytes):
- # log leggero
- pass
- await client.send_file(
- telegram_username,
- path,
- caption=caption,
- thumb=thumbnail_path,
- supports_streaming=True,
- force_document=False,
- attributes=[
- DocumentAttributeVideo(
- duration=duration,
- w=width,
- h=height,
- supports_streaming=True
- )
- ],
- progress_callback=progress_callback
- )
- if thumbnail_path and os.path.exists(thumbnail_path):
- try: os.remove(thumbnail_path)
- except: pass
- send_telegram_message(chat_id, "✅ Invio completato")
- return True
- except asyncio.TimeoutError:
- logger.error("⏰ TIMEOUT durante avvio client Telethon")
- send_telegram_message(owner_chatid, "❌ Timeout connessione Telegram")
- return False
- except FloodWaitError as e:
- logger.warning(f"⏳ Rate limit: attendi {e.seconds}s")
- send_telegram_message(chat_id, f"⏳ Rate limit: attendi {e.seconds}s")
- await asyncio.sleep(e.seconds + 1)
- return False
- except Exception as e:
- logger.error(f"💥 Errore Telethon: {e}")
- send_telegram_message(owner_chatid, f"❌ Errore invio: {str(e)[:100]}")
- return False
- finally:
- await client.disconnect()
- try:
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- result = loop.run_until_complete(async_send())
- loop.close()
- return result
- except Exception as e:
- logger.error(f"💥 Errore wrapper asincrono: {e}")
- send_telegram_message(owner_chatid, "❌ Errore sistema")
- return False
- def update_series(sonarr_host, sonarr_port, sonarr_series_tvdbid, sonarr_api):
- # URL to get the series via tvdbId
- series_url = f"{sonarr_host}:{sonarr_port}/api/v3/series/lookup?term=tvdb:{sonarr_series_tvdbid}"
- headers = {
- "accept": "application/json",
- "X-Api-Key": sonarr_api
- }
- # Get the series
- response = requests.get(series_url, headers=headers)
- if response.status_code != 200:
- logger.error(f"Error in obtaining series data. Status code: {response.status_code}")
- return False
- series_data = response.json()
- if not series_data:
- logger.error(f"No series found for tvdbId {sonarr_series_tvdbid}")
- return False
- series_data = series_data[0] # Let's take the first search result
- # URL to get the episodes
- episodes_url = f"{sonarr_host}:{sonarr_port}/api/v3/episode?seriesId={series_data['id']}"
- response = requests.get(episodes_url, headers=headers)
- if response.status_code != 200:
- logger.error(f"Error in obtaining episode data. Status code: {response.status_code}")
- return False
- episodes_data = response.json()
- # Check the status of episodes for each season
- seasons_to_unmonitor = set()
- for episode in episodes_data:
- season_number = episode['seasonNumber']
- if episode['monitored']:
- seasons_to_unmonitor.discard(season_number)
- else:
- seasons_to_unmonitor.add(season_number)
- # If a season has no more monitored episodes, stop monitoring it
- for season_number in seasons_to_unmonitor:
- for index, season in enumerate(series_data['seasons']):
- if season['seasonNumber'] == season_number:
- if series_data['seasons'][index]['monitored']:
- series_data['seasons'][index]['monitored'] = False
- print(f"Season {season_number} set to unmonitored.")
- break # We get out of the cycle once we find and modify the season
- # Check whether the series has monitored seasons
- monitored_seasons = [season for season in series_data['seasons'] if season['monitored']]
- # Update the series with changes
- update_url = f"{sonarr_host}:{sonarr_port}/api/v3/series/{series_data['id']}"
- response = requests.put(update_url, headers=headers, json=series_data)
- if response.status_code == 202:
- print(f"Series with ID {series_data['id']} successfully updated.")
- else:
- logger.error(f"Error updating series with ID {series_data['id']}. Status code: {response.status_code}")
- return False
- # If the series has no monitored seasons
- if not monitored_seasons:
- if series_data['status'] != 'continuing':
- # If the series does not continue, cancel the series
- delete_url = f"{sonarr_host}:{sonarr_port}/api/v3/series/{series_data['id']}"
- response = requests.delete(delete_url, headers=headers, params={"deleteFiles": "true"})
- if response.status_code == 200:
- print(f"Series with ID {series_data['id']} successfully deleted.")
- return True
- else:
- logger.error(f"Error in deleting series with ID {series_data['id']}. Status code: {response.status_code}")
- return False
- else:
- # If the series continues, do nothing
- print("Series continues, no action taken.")
- return True
- def delete_movie(radarr_host, radarr_port, radarr_movie_id, radarr_api):
- url = f"{radarr_host}:{radarr_port}/api/v3/movie/{radarr_movie_id}"
- params = {
- "deleteFiles": "true",
- "addImportExclusion": "false"
- }
- headers = {
- "accept": "*/*",
- "X-Api-Key": radarr_api
- }
- response = requests.delete(url, params=params, headers=headers)
- if response.status_code == 200:
- print(f"Movie with ID {radarr_movie_id} successfully deleted.")
- return True
- else:
- logger.error(f"Error deleting movie with ID {radarr_movie_id}. Status code: {response.status_code}")
- return False
- def delete_episode_file(sonarr_host, sonarr_port, sonarr_episode_file_id, sonarr_api):
- url = f"{sonarr_host}:{sonarr_port}/api/v3/episodefile/{sonarr_episode_file_id}"
- params = {
- "deleteFiles": "true"
- }
- headers = {
- "accept": "*/*",
- "X-Api-Key": sonarr_api
- }
- response = requests.delete(url, params=params, headers=headers)
- if response.status_code == 200:
- print(f"Episode file with ID {sonarr_episode_file_id} successfully deleted.")
- return True
- else:
- logger.error(f"Error deleting episode file with ID {sonarr_episode_file_id}. Status code: {response.status_code}")
- return False
- def handle_add_event(tags, title, keyword, event_type):
- item_type = "il film" if event_type == 'radarr' else "la serie"
- chat_id = owner_chatid
- message = f"{extract_telegram_data(tags, keyword, 'username')} ha appena aggiunto {item_type} {title}"
- send_telegram_message(chat_id, message)
- sys.exit(0)
- def handle_grab_event(tags, title, release_quality, release_title, release_size, imdb_id, keyword, event_type):
- unique_tags = get_unique_tags(tags, keyword)
- if unique_tags:
- for tag in unique_tags:
- item_type = "Film aggiunto" if event_type == 'radarr' else "Serie aggiunta"
- chat_id = extract_telegram_data(tag, keyword, 'chatid')
- human_readable_size = humanize_size(int(release_size))
- message = f"<b>{item_type} al download:</b>\n{title} ({release_quality})\n[{release_title}] - {human_readable_size}\n<a href='https://imdb.com/title/{imdb_id}'>IMDB Link</a>"
- send_telegram_message(chat_id, message, 'HTML')
- else:
- print("No relevant tags found for Grab event")
- sys.exit(0)
- def process_sonarr_download(sonarr_series_tags, sonarr_series_title, sonarr_series_year, sonarr_episodefile_seasonnumber,
- sonarr_episodefile_episodenumbers, sonarr_episodefile_quality,
- sonarr_episodefile_path, sonarr_episodefile_id, sonarr_series_tvdbid, sonarr_series_id):
- unique_tags = get_unique_tags(sonarr_series_tags, keyword)
- if unique_tags:
- for tag in unique_tags:
- chat_id = extract_telegram_data(tag, keyword, 'chatid')
- telegram_username = extract_telegram_data(tag, keyword, 'username')
- message = f"<b>Episodio importato correttamente:</b>\n{sonarr_series_title} ({sonarr_series_year}) - ({sonarr_episodefile_quality})\n[Stagione {sonarr_episodefile_seasonnumber} - Episodio {sonarr_episodefile_episodenumbers}]"
- send_telegram_message(chat_id, message, 'HTML')
- check_file(sonarr_episodefile_path, chat_id, sonarr_series_id, app="sonarr")
- if send_file_to_telegram(sonarr_episodefile_path, chat_id, telegram_username):
- delete_episode_file(sonarr_host, sonarr_port, sonarr_episodefile_id, sonarr_api)
- update_series(sonarr_host, sonarr_port, sonarr_series_tvdbid, sonarr_api)
- else:
- print("No relevant tags found for Download event")
- def process_radarr_download(radarr_movie_tags, radarr_movie_title, radarr_movie_year,
- radarr_moviefile_quality, radarr_moviefile_scenename,
- radarr_moviefile_path, radarr_movie_id):
- unique_tags = get_unique_tags(radarr_movie_tags, keyword)
- if unique_tags:
- for tag in unique_tags:
- chat_id = extract_telegram_data(tag, keyword, 'chatid')
- telegram_username = extract_telegram_data(tag, keyword, 'username')
- message = f"<b>Film importato correttamente:</b>\n{radarr_movie_title} ({radarr_movie_year}) - ({radarr_moviefile_quality})" + (f"\n[{radarr_moviefile_scenename}]" if radarr_moviefile_scenename else "")
- send_telegram_message(chat_id, message, 'HTML')
- check_file(radarr_moviefile_path, chat_id, radarr_movie_id, app="radarr")
- if send_file_to_telegram(radarr_moviefile_path, chat_id, telegram_username):
- delete_movie(radarr_host, radarr_port, radarr_movie_id, radarr_api)
- else:
- print("No relevant tags found for Download event")
- def run_radarr_sonarr_flow():
- ########## SONARR ENVIRONMENTS ##########
- sonarr_eventtype = os.environ.get('sonarr_eventtype', '')
- sonarr_series_tags = os.environ.get('sonarr_series_tags', '').lower()
- sonarr_series_title = os.environ.get('sonarr_series_title', 'Unknown Series')
- sonarr_release_size = os.environ.get('sonarr_release_size', '')
- sonarr_release_quality = os.environ.get('sonarr_release_quality', '')
- sonarr_release_title = os.environ.get('sonarr_release_title', '')
- sonarr_series_imdbid = os.environ.get('sonarr_series_imdbid', '')
- sonarr_series_year = os.environ.get('sonarr_series_year', '')
- sonarr_episodefile_quality = os.environ.get('sonarr_episodefile_quality', '')
- sonarr_episodefile_seasonnumber = os.environ.get('sonarr_episodefile_seasonnumber', '')
- sonarr_episodefile_episodenumbers = os.environ.get('sonarr_episodefile_episodenumbers', '')
- sonarr_episodefile_path = os.environ.get('sonarr_episodefile_path', '')
- sonarr_episodefile_id = os.environ.get('sonarr_episodefile_id', '')
- sonarr_series_tvdbid = os.environ.get('sonarr_series_tvdbid', '')
- sonarr_series_id = os.environ.get('sonarr_series_id', '')
- ########## SONARR ENVIRONMENTS ##########
- ########## RADARR ENVIRONMENTS ##########
- radarr_eventtype = os.environ.get('radarr_eventtype', '')
- radarr_movie_tags = os.environ.get('radarr_movie_tags', '').lower()
- radarr_movie_title = os.environ.get('radarr_movie_title', 'Unknown Movie')
- radarr_release_size = os.environ.get('radarr_release_size', '')
- radarr_release_quality = os.environ.get('radarr_release_quality', '')
- radarr_release_title = os.environ.get('radarr_release_title', '')
- radarr_movie_imdbid = os.environ.get('radarr_movie_imdbid', '')
- radarr_movie_year = os.environ.get('radarr_movie_year', '')
- radarr_moviefile_quality = os.environ.get('radarr_moviefile_quality', '')
- radarr_moviefile_scenename = os.environ.get('radarr_moviefile_scenename', '')
- radarr_moviefile_path = os.environ.get('radarr_moviefile_path', '')
- radarr_movie_id = os.environ.get('radarr_movie_id', '')
- ########## RADARR ENVIRONMENTS ##########
- # Check if the event type is provided
- if not radarr_eventtype and not sonarr_eventtype:
- logger.error("This script works only if called within Radarr or Sonarr")
- sys.exit(0)
- if radarr_eventtype == "Test" or sonarr_eventtype == "Test":
- print("Test in progress... Good-bye!")
- sys.exit(0)
- if keyword in radarr_movie_tags or keyword in sonarr_series_tags:
- # Check the event type
- if sonarr_eventtype == "SeriesAdd":
- handle_add_event(
- sonarr_series_tags,
- sonarr_series_title,
- keyword,
- 'sonarr'
- )
- if radarr_eventtype == "MovieAdded":
- handle_add_event(
- radarr_movie_tags,
- radarr_movie_title,
- keyword,
- 'radarr'
- )
- if sonarr_eventtype == "Grab":
- handle_grab_event(
- sonarr_series_tags,
- sonarr_series_title,
- sonarr_release_quality,
- sonarr_release_title,
- sonarr_release_size,
- sonarr_series_imdbid,
- keyword,
- 'sonarr'
- )
- if radarr_eventtype == "Grab":
- handle_grab_event(
- radarr_movie_tags,
- radarr_movie_title,
- radarr_release_quality,
- radarr_release_title,
- radarr_release_size,
- radarr_movie_imdbid,
- keyword,
- 'radarr'
- )
- if sonarr_eventtype == "Download":
- process_sonarr_download(
- sonarr_series_tags,
- sonarr_series_title,
- sonarr_series_year,
- sonarr_episodefile_seasonnumber,
- sonarr_episodefile_episodenumbers,
- sonarr_episodefile_quality,
- sonarr_episodefile_path,
- sonarr_episodefile_id,
- sonarr_series_tvdbid,
- sonarr_series_id
- )
- if radarr_eventtype == "Download":
- process_radarr_download(
- radarr_movie_tags,
- radarr_movie_title,
- radarr_movie_year,
- radarr_moviefile_quality,
- radarr_moviefile_scenename,
- radarr_moviefile_path,
- radarr_movie_id
- )
- if __name__ == '__main__':
- # Dump all'avvio (parte sempre, ma non scrive se ENV_DUMP è False)
- dump_env()
- # Se richiami con subcomando "manual", esegui test manuale
- if len(sys.argv) > 1 and sys.argv[1] == 'manual':
- args = parse_manual_args()
- sys.exit(
- run_manual_send(
- file_path=args.file,
- chat_id=args.chat_id,
- entity=args.entity,
- split_mb=args.split
- )
- )
- else:
- # Flusso Radarr/Sonarr invariato
- run_radarr_sonarr_flow()
Advertisement
Add Comment
Please, Sign In to add comment