Guest User

gpt_sovits_api_colab_ngrok.py

a guest
Dec 14th, 2024
38
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 21.33 KB | Source Code | 0 0
  1. """
  2. # WebAPI文档
  3.  
  4. ` python api_v2.py -a 127.0.0.1 -p 9880 -c GPT_SoVITS/configs/tts_infer.yaml `
  5.  
  6. ## 执行参数:
  7.    `-a` - `绑定地址, 默认"127.0.0.1"`
  8.    `-p` - `绑定端口, 默认9880`
  9.    `-c` - `TTS配置文件路径, 默认"GPT_SoVITS/configs/tts_infer.yaml"`
  10.  
  11. ## 调用:
  12.  
  13. ### 推理
  14.  
  15. endpoint: `/tts`
  16. GET:
  17. ```
  18. http://127.0.0.1:9880/tts?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_lang=zh&ref_audio_path=archive_jingyuan_1.wav&prompt_lang=zh&prompt_text=我是「罗浮」云骑将军景元。不必拘谨,「将军」只是一时的身份,你称呼我景元便可&text_split_method=cut5&batch_size=1&media_type=wav&streaming_mode=true
  19. ```
  20.  
  21. POST:
  22. ```json
  23. {
  24.    "text": "",                   # str.(required) text to be synthesized
  25.    "text_lang: "",               # str.(required) language of the text to be synthesized
  26.    "ref_audio_path": "",         # str.(required) reference audio path
  27.    "aux_ref_audio_paths": [],    # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
  28.    "prompt_text": "",            # str.(optional) prompt text for the reference audio
  29.    "prompt_lang": "",            # str.(required) language of the prompt text for the reference audio
  30.    "top_k": 5,                   # int. top k sampling
  31.    "top_p": 1,                   # float. top p sampling
  32.    "temperature": 1,             # float. temperature for sampling
  33.    "text_split_method": "cut0",  # str. text split method, see text_segmentation_method.py for details.
  34.    "batch_size": 1,              # int. batch size for inference
  35.    "batch_threshold": 0.75,      # float. threshold for batch splitting.
  36.    "split_bucket: True,          # bool. whether to split the batch into multiple buckets.
  37.    "speed_factor":1.0,           # float. control the speed of the synthesized audio.
  38.    "streaming_mode": False,      # bool. whether to return a streaming response.
  39.    "seed": -1,                   # int. random seed for reproducibility.
  40.    "parallel_infer": True,       # bool. whether to use parallel inference.
  41.    "repetition_penalty": 1.35    # float. repetition penalty for T2S model.
  42. }
  43. ```
  44.  
  45. RESP:
  46. 成功: 直接返回 wav 音频流, http code 200
  47. 失败: 返回包含错误信息的 json, http code 400
  48.  
  49. ### 命令控制
  50.  
  51. endpoint: `/control`
  52.  
  53. command:
  54. "restart": 重新运行
  55. "exit": 结束运行
  56.  
  57. GET:
  58. ```
  59. http://127.0.0.1:9880/control?command=restart
  60. ```
  61. POST:
  62. ```json
  63. {
  64.    "command": "restart"
  65. }
  66. ```
  67.  
  68. RESP: 无
  69.  
  70.  
  71. ### 切换GPT模型
  72.  
  73. endpoint: `/set_gpt_weights`
  74.  
  75. GET:
  76. ```
  77. http://127.0.0.1:9880/set_gpt_weights?weights_path=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
  78. ```
  79. RESP:
  80. 成功: 返回"success", http code 200
  81. 失败: 返回包含错误信息的 json, http code 400
  82.  
  83.  
  84. ### 切换Sovits模型
  85.  
  86. endpoint: `/set_sovits_weights`
  87.  
  88. GET:
  89. ```
  90. http://127.0.0.1:9880/set_sovits_weights?weights_path=GPT_SoVITS/pretrained_models/s2G488k.pth
  91. ```
  92.  
  93. RESP:
  94. 成功: 返回"success", http code 200
  95. 失败: 返回包含错误信息的 json, http code 400
  96.    
  97. """
  98. import os
  99. import sys
  100. import traceback
  101. from typing import Generator
  102.  
  103. now_dir = os.getcwd()
  104. sys.path.append(now_dir)
  105. sys.path.append("%s/GPT_SoVITS" % (now_dir))
  106.  
  107. import argparse
  108. import subprocess
  109. import wave
  110. import signal
  111. import numpy as np
  112. import soundfile as sf
  113. from fastapi import FastAPI, Request, HTTPException, Response
  114. from fastapi.responses import StreamingResponse, JSONResponse
  115. from fastapi import FastAPI, UploadFile, File
  116. import uvicorn
  117. from io import BytesIO
  118. from tools.i18n.i18n import I18nAuto
  119. from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config
  120. from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names
  121. from fastapi.responses import StreamingResponse
  122. import nest_asyncio
  123. from pyngrok import ngrok
  124. import multiprocessing
  125. import time
  126.  
  127. from pydantic import BaseModel
  128. # print(sys.path)
  129. i18n = I18nAuto()
  130. cut_method_names = get_cut_method_names()
  131.  
  132. parser = argparse.ArgumentParser(description="GPT-SoVITS api")
  133. parser.add_argument("-c", "--tts_config", type=str, default="GPT_SoVITS/configs/tts_infer.yaml", help="tts_infer路径")
  134. parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1")
  135. parser.add_argument("-p", "--port", type=int, default="9880", help="default: 9880")
  136. args = parser.parse_args()
  137. config_path = args.tts_config
  138. # device = args.device
  139. port = args.port
  140. host = args.bind_addr
  141. argv = sys.argv
  142.  
  143. if config_path in [None, ""]:
  144.     config_path = "GPT-SoVITS/configs/tts_infer.yaml"
  145.  
  146. tts_config = TTS_Config(config_path)
  147. print(tts_config)
  148. tts_pipeline = TTS(tts_config)
  149.  
  150. APP = FastAPI()
  151. class TTS_Request(BaseModel):
  152.     text: str = None
  153.     text_lang: str = None
  154.     ref_audio_path: str = None
  155.     aux_ref_audio_paths: list = None
  156.     prompt_lang: str = None
  157.     prompt_text: str = ""
  158.     top_k:int = 5
  159.     top_p:float = 1
  160.     temperature:float = 1
  161.     text_split_method:str = "cut5"
  162.     batch_size:int = 1
  163.     batch_threshold:float = 0.75
  164.     split_bucket:bool = True
  165.     speed_factor:float = 1.0
  166.     fragment_interval:float = 0.3
  167.     seed:int = -1
  168.     media_type:str = "wav"
  169.     streaming_mode:bool = False
  170.     parallel_infer:bool = True
  171.     repetition_penalty:float = 1.35
  172.  
  173. # List of supported audio file extensions
  174. AUDIO_EXTENSIONS = ('.wav', '.aac', '.flac', '.ogg', '.mp3', '.m4a')
  175.  
  176. ### modify from https://github.com/RVC-Boss/GPT-SoVITS/pull/894/files
  177. def pack_ogg(io_buffer:BytesIO, data:np.ndarray, rate:int):
  178.     with sf.SoundFile(io_buffer, mode='w', samplerate=rate, channels=1, format='ogg') as audio_file:
  179.         audio_file.write(data)
  180.     return io_buffer
  181.  
  182.  
  183. def pack_raw(io_buffer:BytesIO, data:np.ndarray, rate:int):
  184.     io_buffer.write(data.tobytes())
  185.     return io_buffer
  186.  
  187.  
  188. def pack_wav(io_buffer:BytesIO, data:np.ndarray, rate:int):
  189.     io_buffer = BytesIO()
  190.     sf.write(io_buffer, data, rate, format='wav')
  191.     return io_buffer
  192.  
  193. def pack_aac(io_buffer:BytesIO, data:np.ndarray, rate:int):
  194.     process = subprocess.Popen([
  195.         'ffmpeg',
  196.         '-f', 's16le',  # 输入16位有符号小端整数PCM
  197.         '-ar', str(rate),  # 设置采样率
  198.         '-ac', '1',  # 单声道
  199.         '-i', 'pipe:0',  # 从管道读取输入
  200.         '-c:a', 'aac',  # 音频编码器为AAC
  201.         '-b:a', '192k',  # 比特率
  202.         '-vn',  # 不包含视频
  203.         '-f', 'adts',  # 输出AAC数据流格式
  204.         'pipe:1'  # 将输出写入管道
  205.     ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  206.     out, _ = process.communicate(input=data.tobytes())
  207.     io_buffer.write(out)
  208.     return io_buffer
  209.  
  210. def pack_audio(io_buffer:BytesIO, data:np.ndarray, rate:int, media_type:str):
  211.     if media_type == "ogg":
  212.         io_buffer = pack_ogg(io_buffer, data, rate)
  213.     elif media_type == "aac":
  214.         io_buffer = pack_aac(io_buffer, data, rate)
  215.     elif media_type == "wav":
  216.         io_buffer = pack_wav(io_buffer, data, rate)
  217.     else:
  218.         io_buffer = pack_raw(io_buffer, data, rate)
  219.     io_buffer.seek(0)
  220.     return io_buffer
  221.  
  222.  
  223.  
  224. # from https://huggingface.co/spaces/coqui/voice-chat-with-mistral/blob/main/app.py
  225. def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=32000):
  226.     # This will create a wave header then append the frame input
  227.     # It should be first on a streaming wav file
  228.     # Other frames better should not have it (else you will hear some artifacts each chunk start)
  229.     wav_buf = BytesIO()
  230.     with wave.open(wav_buf, "wb") as vfout:
  231.         vfout.setnchannels(channels)
  232.         vfout.setsampwidth(sample_width)
  233.         vfout.setframerate(sample_rate)
  234.         vfout.writeframes(frame_input)
  235.  
  236.     wav_buf.seek(0)
  237.     return wav_buf.read()
  238.  
  239.  
  240. def handle_control(command:str):
  241.     if command == "restart":
  242.         os.execl(sys.executable, sys.executable, *argv)
  243.     elif command == "exit":
  244.         os.kill(os.getpid(), signal.SIGTERM)
  245.         exit(0)
  246.  
  247.  
  248. def check_params(req:dict):
  249.     text:str = req.get("text", "")
  250.     text_lang:str = req.get("text_lang", "")
  251.     ref_audio_path:str = req.get("ref_audio_path", "")
  252.     streaming_mode:bool = req.get("streaming_mode", False)
  253.     media_type:str = req.get("media_type", "wav")
  254.     prompt_lang:str = req.get("prompt_lang", "")
  255.     text_split_method:str = req.get("text_split_method", "cut5")
  256.  
  257.     if ref_audio_path in [None, ""]:
  258.         return JSONResponse(status_code=400, content={"message": "ref_audio_path is required"})
  259.     if text in [None, ""]:
  260.         return JSONResponse(status_code=400, content={"message": "text is required"})
  261.     if (text_lang in [None, ""]) :
  262.         return JSONResponse(status_code=400, content={"message": "text_lang is required"})
  263.     elif text_lang.lower() not in tts_config.languages:
  264.         return JSONResponse(status_code=400, content={"message": f"text_lang: {text_lang} is not supported in version {tts_config.version}"})
  265.     if (prompt_lang in [None, ""]) :
  266.         return JSONResponse(status_code=400, content={"message": "prompt_lang is required"})
  267.     elif prompt_lang.lower() not in tts_config.languages:
  268.         return JSONResponse(status_code=400, content={"message": f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}"})
  269.     if media_type not in ["wav", "raw", "ogg", "aac"]:
  270.         return JSONResponse(status_code=400, content={"message": f"media_type: {media_type} is not supported"})
  271.     elif media_type == "ogg" and  not streaming_mode:
  272.         return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"})
  273.    
  274.     if text_split_method not in cut_method_names:
  275.         return JSONResponse(status_code=400, content={"message": f"text_split_method:{text_split_method} is not supported"})
  276.  
  277.     return None
  278.  
  279. async def tts_handle(req:dict):
  280.     """
  281.    Text to speech handler.
  282.    
  283.    Args:
  284.        req (dict):
  285.            {
  286.                "text": "",                   # str.(required) text to be synthesized
  287.                "text_lang: "",               # str.(required) language of the text to be synthesized
  288.                "ref_audio_path": "",         # str.(required) reference audio path
  289.                "aux_ref_audio_paths": [],    # list.(optional) auxiliary reference audio paths for multi-speaker synthesis
  290.                "prompt_text": "",            # str.(optional) prompt text for the reference audio
  291.                "prompt_lang": "",            # str.(required) language of the prompt text for the reference audio
  292.                "top_k": 5,                   # int. top k sampling
  293.                "top_p": 1,                   # float. top p sampling
  294.                "temperature": 1,             # float. temperature for sampling
  295.                "text_split_method": "cut5",  # str. text split method, see text_segmentation_method.py for details.
  296.                "batch_size": 1,              # int. batch size for inference
  297.                "batch_threshold": 0.75,      # float. threshold for batch splitting.
  298.                "split_bucket: True,          # bool. whether to split the batch into multiple buckets.
  299.                "speed_factor":1.0,           # float. control the speed of the synthesized audio.
  300.                "fragment_interval":0.3,      # float. to control the interval of the audio fragment.
  301.                "seed": -1,                   # int. random seed for reproducibility.
  302.                "media_type": "wav",          # str. media type of the output audio, support "wav", "raw", "ogg", "aac".
  303.                "streaming_mode": False,      # bool. whether to return a streaming response.
  304.                "parallel_infer": True,       # bool.(optional) whether to use parallel inference.
  305.                "repetition_penalty": 1.35    # float.(optional) repetition penalty for T2S model.          
  306.            }
  307.    returns:
  308.        StreamingResponse: audio stream response.
  309.    """
  310.    
  311.     streaming_mode = req.get("streaming_mode", False)
  312.     return_fragment = req.get("return_fragment", False)
  313.     media_type = req.get("media_type", "wav")
  314.  
  315.     check_res = check_params(req)
  316.     if check_res is not None:
  317.         return check_res
  318.  
  319.     if streaming_mode or return_fragment:
  320.         req["return_fragment"] = True
  321.    
  322.     try:
  323.         tts_generator=tts_pipeline.run(req)
  324.        
  325.         if streaming_mode:
  326.             def streaming_generator(tts_generator:Generator, media_type:str):
  327.                 if media_type == "wav":
  328.                     yield wave_header_chunk()
  329.                     media_type = "raw"
  330.                 for sr, chunk in tts_generator:
  331.                     yield pack_audio(BytesIO(), chunk, sr, media_type).getvalue()
  332.             # _media_type = f"audio/{media_type}" if not (streaming_mode and media_type in ["wav", "raw"]) else f"audio/x-{media_type}"
  333.             return StreamingResponse(streaming_generator(tts_generator, media_type, ), media_type=f"audio/{media_type}")
  334.    
  335.         else:
  336.             sr, audio_data = next(tts_generator)
  337.             audio_data = pack_audio(BytesIO(), audio_data, sr, media_type).getvalue()
  338.             return Response(audio_data, media_type=f"audio/{media_type}")
  339.     except Exception as e:
  340.         return JSONResponse(status_code=400, content={"message": f"tts failed", "Exception": str(e)})
  341.    
  342. def list_audio_files(ref_audio_path: str = None):
  343.     try:
  344.         if ref_audio_path:
  345.             directory = ref_audio_path
  346.             if not os.path.isdir(directory):
  347.                 return JSONResponse(status_code=404, content={"message": "Specified path is not a valid directory."})
  348.         else:
  349.             directory = os.getcwd()
  350.  
  351.         files = sorted(os.listdir(directory), key=str.casefold)
  352.         audio_files = [f for f in files if f.lower().endswith(AUDIO_EXTENSIONS)]
  353.  
  354.         if not audio_files:
  355.             return JSONResponse(status_code=404, content={"message": "No audio files found in the directory."})
  356.         return JSONResponse(content={"audio_files": audio_files})
  357.     except Exception as e:
  358.         return JSONResponse(status_code=500, content={"message": f"Failed to list audio files: {str(e)}"})
  359.  
  360. def list_gpt_files(directory_path: str = "GPT_weights_v2"):
  361.     try:
  362.         current_dir = os.path.join(os.getcwd(), directory_path)
  363.         files = sorted(os.listdir(current_dir), key=str.casefold)
  364.         gpt_files = [f for f in files if f.lower().endswith('.ckpt')]
  365.         if not gpt_files:
  366.             return JSONResponse(status_code=404, content={"message": "No gpt files found in the directory."})
  367.         return JSONResponse(content={"gpt_files": gpt_files})
  368.     except Exception as e:
  369.         return JSONResponse(status_code=500, content={"message": f"Failed to list gpt files: {str(e)}"})
  370.  
  371. def list_sovits_files(directory_path: str = "SoVITS_weights_v2"):
  372.     try:
  373.         current_dir = os.path.join(os.getcwd(), directory_path)
  374.         files = sorted(os.listdir(current_dir), key=str.casefold)
  375.         sovits_files = [f for f in files if f.lower().endswith('.pth')]
  376.         if not sovits_files:
  377.             return JSONResponse(status_code=404, content={"message": "No sovits files found in the directory."})
  378.         return JSONResponse(content={"sovits_files": sovits_files})
  379.     except Exception as e:
  380.         return JSONResponse(status_code=500, content={"message": f"Failed to list sovits files: {str(e)}"})
  381.  
  382. @APP.get("/list_audio_files")
  383. async def get_audio_files(ref_audio_path: str = None):
  384.     return list_audio_files(ref_audio_path)
  385.  
  386. @APP.get("/list_gpt_files")
  387. async def get_gpt_files(directory_path: str = "GPT_weights_v2"):
  388.     return list_gpt_files(directory_path)
  389.  
  390. @APP.get("/list_sovits_files")
  391. async def get_sovits_files(directory_path: str = "SoVITS_weights_v2"):
  392.     return list_sovits_files(directory_path)
  393.  
  394. @APP.get("/control")
  395. async def control(command: str = None):
  396.     if command is None:
  397.         return JSONResponse(status_code=400, content={"message": "command is required"})
  398.     handle_control(command)
  399.  
  400.  
  401.  
  402. @APP.get("/tts")
  403. async def tts_get_endpoint(
  404.                         text: str = None,
  405.                         text_lang: str = None,
  406.                         ref_audio_path: str = None,
  407.                         aux_ref_audio_paths:list = None,
  408.                         prompt_lang: str = None,
  409.                         prompt_text: str = "",
  410.                         top_k:int = 5,
  411.                         top_p:float = 1,
  412.                         temperature:float = 1,
  413.                         text_split_method:str = "cut0",
  414.                         batch_size:int = 1,
  415.                         batch_threshold:float = 0.75,
  416.                         split_bucket:bool = True,
  417.                         speed_factor:float = 1.0,
  418.                         fragment_interval:float = 0.3,
  419.                         seed:int = -1,
  420.                         media_type:str = "wav",
  421.                         streaming_mode:bool = False,
  422.                         parallel_infer:bool = True,
  423.                         repetition_penalty:float = 1.35
  424.                         ):
  425.     req = {
  426.         "text": text,
  427.         "text_lang": text_lang.lower(),
  428.         "ref_audio_path": ref_audio_path,
  429.         "aux_ref_audio_paths": aux_ref_audio_paths,
  430.         "prompt_text": prompt_text,
  431.         "prompt_lang": prompt_lang.lower(),
  432.         "top_k": top_k,
  433.         "top_p": top_p,
  434.         "temperature": temperature,
  435.         "text_split_method": text_split_method,
  436.         "batch_size":int(batch_size),
  437.         "batch_threshold":float(batch_threshold),
  438.         "speed_factor":float(speed_factor),
  439.         "split_bucket":split_bucket,
  440.         "fragment_interval":fragment_interval,
  441.         "seed":seed,
  442.         "media_type":media_type,
  443.         "streaming_mode":streaming_mode,
  444.         "parallel_infer":parallel_infer,
  445.         "repetition_penalty":float(repetition_penalty)
  446.     }
  447.     return await tts_handle(req)
  448.                
  449.  
  450. @APP.post("/tts")
  451. async def tts_post_endpoint(request: TTS_Request):
  452.     req = request.dict()
  453.     return await tts_handle(req)
  454.  
  455.  
  456. @APP.get("/set_refer_audio")
  457. async def set_refer_aduio(refer_audio_path: str = None):
  458.     try:
  459.         tts_pipeline.set_ref_audio(refer_audio_path)
  460.     except Exception as e:
  461.         return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)})
  462.     return JSONResponse(status_code=200, content={"message": "success"})
  463.  
  464.  
  465. # @APP.post("/set_refer_audio")
  466. # async def set_refer_aduio_post(audio_file: UploadFile = File(...)):
  467. #     try:
  468. #         # 检查文件类型,确保是音频文件
  469. #         if not audio_file.content_type.startswith("audio/"):
  470. #             return JSONResponse(status_code=400, content={"message": "file type is not supported"})
  471.        
  472. #         os.makedirs("uploaded_audio", exist_ok=True)
  473. #         save_path = os.path.join("uploaded_audio", audio_file.filename)
  474. #         # 保存音频文件到服务器上的一个目录
  475. #         with open(save_path , "wb") as buffer:
  476. #             buffer.write(await audio_file.read())
  477.            
  478. #         tts_pipeline.set_ref_audio(save_path)
  479. #     except Exception as e:
  480. #         return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)})
  481. #     return JSONResponse(status_code=200, content={"message": "success"})
  482.  
  483. @APP.get("/set_gpt_weights")
  484. async def set_gpt_weights(weights_path: str = None):
  485.     try:
  486.         if weights_path in ["", None]:
  487.             return JSONResponse(status_code=400, content={"message": "gpt weight path is required"})
  488.         tts_pipeline.init_t2s_weights(weights_path)
  489.     except Exception as e:
  490.         return JSONResponse(status_code=400, content={"message": f"change gpt weight failed", "Exception": str(e)})
  491.  
  492.     return JSONResponse(status_code=200, content={"message": "success"})
  493.  
  494.  
  495. @APP.get("/set_sovits_weights")
  496. async def set_sovits_weights(weights_path: str = None):
  497.     try:
  498.         if weights_path in ["", None]:
  499.             return JSONResponse(status_code=400, content={"message": "sovits weight path is required"})
  500.         tts_pipeline.init_vits_weights(weights_path)
  501.     except Exception as e:
  502.         return JSONResponse(status_code=400, content={"message": f"change sovits weight failed", "Exception": str(e)})
  503.     return JSONResponse(status_code=200, content={"message": "success"})
  504.  
  505.  
  506.  
  507. # Define the function to start Uvicorn
  508. def start_uvicorn(host, port):
  509.     nest_asyncio.apply()
  510.     uvicorn.run(app=APP, host=host, port=port, workers=1)
  511.  
  512. if __name__ == "__main__":
  513.     host = None  # or specify the host as needed
  514.     port = 9880  # or the desired port
  515.  
  516.     # Start Uvicorn in a separate process
  517.     uvicorn_process = multiprocessing.Process(target=start_uvicorn, args=(host, port))
  518.     multiprocessing.set_start_method('spawn', force=True)
  519.     uvicorn_process.start()
  520.  
  521.     # Wait for Uvicorn to start
  522.     time.sleep(5)  # Adjust the sleep time as necessary
  523.  
  524.     try:
  525.         # Set the Ngrok authentication token
  526.         ngrok.set_auth_token("TOKEN_HERE")
  527.         # Connect Ngrok to the port Uvicorn is listening on
  528.         ngrok_tunnel = ngrok.connect(port)
  529.         # Print the public URL
  530.         print('Public URL:', ngrok_tunnel.public_url)
  531.         while True:
  532.           time.sleep(1)
  533.     except Exception as e:
  534.         traceback.print_exc()
  535.         os.kill(os.getpid(), signal.SIGTERM)
  536.         exit(0)
Advertisement
Add Comment
Please, Sign In to add comment