Advertisement
Guest User

Untitled

a guest
Aug 20th, 2019
100
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 47.32 KB | None | 0 0
  1. {
  2. "nbformat": 4,
  3. "nbformat_minor": 0,
  4. "metadata": {
  5. "colab": {
  6. "name": "DFL_orig.ipynb",
  7. "version": "0.3.2",
  8. "provenance": [],
  9. "collapsed_sections": [
  10. "JuVn21kt40Gw",
  11. "6jHv35sm-Qiy",
  12. "tUNVcbujhm00",
  13. "WTuyUxgdLA13",
  14. "avAcSL_uvtq_",
  15. "f7GNQ7kZx7Ha"
  16. ],
  17. "toc_visible": true
  18. },
  19. "kernelspec": {
  20. "name": "python3",
  21. "display_name": "Python 3"
  22. },
  23. "accelerator": "GPU"
  24. },
  25. "cells": [
  26. {
  27. "cell_type": "markdown",
  28. "metadata": {
  29. "id": "0cKdTCuv4tXh",
  30. "colab_type": "text"
  31. },
  32. "source": [
  33. "# Welcome to DFL-Colab!\n",
  34. "\n",
  35. "This is an adapted version of the DFL for Google Colab.\n",
  36. "\n",
  37. "Version 2.5\n",
  38. "\n",
  39. "# Overview\n",
  40. "* Extractor works in full functionality.\n",
  41. "* Training can work without preview.\n",
  42. "* Converter works in full functionality.\n",
  43. "* You can import/export workspace with your Google Drive.\n",
  44. "* Import/export and another manipulations with workspace you can do in \"Manage workspace\" block\n",
  45. "* Google Colab machine active for 12 hours. DFL-Colab makes a backup of your workspace in training mode, after 11 hours from the start of the session.\n",
  46. "* Google does not like long-term heavy calculations. Therefore, for training more than two sessions in a row, use two Google accounts. It is recommended to split your training over 2 accounts, but you can use one Google Drive account to store your workspace.\n",
  47. "\n"
  48. ]
  49. },
  50. {
  51. "cell_type": "markdown",
  52. "metadata": {
  53. "id": "JuVn21kt40Gw",
  54. "colab_type": "text"
  55. },
  56. "source": [
  57. "# Clone Github repository and install requirements\n",
  58. "\n",
  59. "* Clone Github repository or pull updates\n",
  60. "* Requirements install is automatically"
  61. ]
  62. },
  63. {
  64. "cell_type": "code",
  65. "metadata": {
  66. "id": "JG-f2WqT4fLK",
  67. "colab_type": "code",
  68. "cellView": "form",
  69. "colab": {
  70. "base_uri": "https://localhost:8080/",
  71. "height": 1000
  72. },
  73. "outputId": "dbc2cdd6-e6a0-40f0-f0d6-fa49bba7017e"
  74. },
  75. "source": [
  76. "#@title Clone or pull DeepFaceLab from Github\n",
  77. "\n",
  78. "Mode = \"clone\" #@param [\"clone\", \"pull\"]\n",
  79. "\n",
  80. "from pathlib import Path\n",
  81. "if (Mode == \"clone\"):\n",
  82. " !git clone https://github.com/iperov/DeepFaceLab.git\n",
  83. "else:\n",
  84. " %cd /content/DeepFaceLab\n",
  85. " !git pull\n",
  86. "\n",
  87. "!pip install -r /content/DeepFaceLab/requirements-colab.txt\n",
  88. "!pip install --upgrade scikit-image\n",
  89. "\n",
  90. "if not Path(\"/content/workspace\").exists():\n",
  91. " !wget -q --no-check-certificate -r 'https://docs.google.com/uc?export=download&id=1hTH2h6l_4kKrczA8EkN6GyuXx4lzmCnK' -O pretrain_CelebA.zip\n",
  92. " !mkdir /content/pretrain\n",
  93. " !unzip -q /content/pretrain_CelebA.zip -d /content/pretrain/\n",
  94. " !rm /content/pretrain_CelebA.zip\n",
  95. "\n",
  96. "print(\"Done!\")"
  97. ],
  98. "execution_count": 1,
  99. "outputs": [
  100. {
  101. "output_type": "stream",
  102. "text": [
  103. "Cloning into 'DeepFaceLab'...\n",
  104. "remote: Enumerating objects: 26, done.\u001b[K\n",
  105. "remote: Counting objects: 100% (26/26), done.\u001b[K\n",
  106. "remote: Compressing objects: 100% (22/22), done.\u001b[K\n",
  107. "remote: Total 3342 (delta 8), reused 12 (delta 4), pack-reused 3316\u001b[K\n",
  108. "Receiving objects: 100% (3342/3342), 301.86 MiB | 11.44 MiB/s, done.\n",
  109. "Resolving deltas: 100% (2149/2149), done.\n",
  110. "Checking out files: 100% (122/122), done.\n",
  111. "Collecting git+https://www.github.com/keras-team/keras-contrib.git (from -r /content/DeepFaceLab/requirements-colab.txt (line 10))\n",
  112. " Cloning https://www.github.com/keras-team/keras-contrib.git to /tmp/pip-req-build-5gvr326i\n",
  113. " Running command git clone -q https://www.github.com/keras-team/keras-contrib.git /tmp/pip-req-build-5gvr326i\n",
  114. "Collecting numpy==1.16.3 (from -r /content/DeepFaceLab/requirements-colab.txt (line 1))\n",
  115. "\u001b[?25l Downloading https://files.pythonhosted.org/packages/c1/e2/4db8df8f6cddc98e7d7c537245ef2f4e41a1ed17bf0c3177ab3cc6beac7f/numpy-1.16.3-cp36-cp36m-manylinux1_x86_64.whl (17.3MB)\n",
  116. "\u001b[K |████████████████████████████████| 17.3MB 2.8MB/s \n",
  117. "\u001b[?25hCollecting h5py==2.9.0 (from -r /content/DeepFaceLab/requirements-colab.txt (line 2))\n",
  118. "\u001b[?25l Downloading https://files.pythonhosted.org/packages/30/99/d7d4fbf2d02bb30fb76179911a250074b55b852d34e98dd452a9f394ac06/h5py-2.9.0-cp36-cp36m-manylinux1_x86_64.whl (2.8MB)\n",
  119. "\u001b[K |████████████████████████████████| 2.8MB 27.6MB/s \n",
  120. "\u001b[?25hRequirement already satisfied: Keras==2.2.4 in /usr/local/lib/python3.6/dist-packages (from -r /content/DeepFaceLab/requirements-colab.txt (line 3)) (2.2.4)\n",
  121. "Collecting opencv-python==4.0.0.21 (from -r /content/DeepFaceLab/requirements-colab.txt (line 4))\n",
  122. "\u001b[?25l Downloading https://files.pythonhosted.org/packages/37/49/874d119948a5a084a7ebe98308214098ef3471d76ab74200f9800efeef15/opencv_python-4.0.0.21-cp36-cp36m-manylinux1_x86_64.whl (25.4MB)\n",
  123. "\u001b[K |████████████████████████████████| 25.4MB 1.9MB/s \n",
  124. "\u001b[?25hCollecting tensorflow-gpu==1.13.1 (from -r /content/DeepFaceLab/requirements-colab.txt (line 5))\n",
  125. "\u001b[?25l Downloading https://files.pythonhosted.org/packages/7b/b1/0ad4ae02e17ddd62109cd54c291e311c4b5fd09b4d0678d3d6ce4159b0f0/tensorflow_gpu-1.13.1-cp36-cp36m-manylinux1_x86_64.whl (345.2MB)\n",
  126. "\u001b[K |████████████████████████████████| 345.2MB 65kB/s \n",
  127. "\u001b[?25hCollecting plaidml-keras==0.5.0 (from -r /content/DeepFaceLab/requirements-colab.txt (line 6))\n",
  128. " Downloading https://files.pythonhosted.org/packages/17/34/4102261e3d8867c31bae9f4def5d7e700fc25fff232fa1780040e8ed79b0/plaidml_keras-0.5.0-py2.py3-none-any.whl\n",
  129. "Requirement already satisfied: scikit-image in /usr/local/lib/python3.6/dist-packages (from -r /content/DeepFaceLab/requirements-colab.txt (line 7)) (0.15.0)\n",
  130. "Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from -r /content/DeepFaceLab/requirements-colab.txt (line 8)) (4.28.1)\n",
  131. "Collecting ffmpeg-python==0.1.17 (from -r /content/DeepFaceLab/requirements-colab.txt (line 9))\n",
  132. " Downloading https://files.pythonhosted.org/packages/3d/10/330cbc8e63d072d40413f4d470444a6a1e8c8c6a80b2a4ac302d1252ca1b/ffmpeg_python-0.1.17-py3-none-any.whl\n",
  133. "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from h5py==2.9.0->-r /content/DeepFaceLab/requirements-colab.txt (line 2)) (1.12.0)\n",
  134. "Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from Keras==2.2.4->-r /content/DeepFaceLab/requirements-colab.txt (line 3)) (1.1.0)\n",
  135. "Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.6/dist-packages (from Keras==2.2.4->-r /content/DeepFaceLab/requirements-colab.txt (line 3)) (1.3.1)\n",
  136. "Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from Keras==2.2.4->-r /content/DeepFaceLab/requirements-colab.txt (line 3)) (3.13)\n",
  137. "Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from Keras==2.2.4->-r /content/DeepFaceLab/requirements-colab.txt (line 3)) (1.0.8)\n",
  138. "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (0.33.4)\n",
  139. "Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (0.8.0)\n",
  140. "Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (1.15.0)\n",
  141. "Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (3.7.1)\n",
  142. "Collecting tensorflow-estimator<1.14.0rc0,>=1.13.0 (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5))\n",
  143. "\u001b[?25l Downloading https://files.pythonhosted.org/packages/bb/48/13f49fc3fa0fdf916aa1419013bb8f2ad09674c275b4046d5ee669a46873/tensorflow_estimator-1.13.0-py2.py3-none-any.whl (367kB)\n",
  144. "\u001b[K |████████████████████████████████| 368kB 39.6MB/s \n",
  145. "\u001b[?25hRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (1.1.0)\n",
  146. "Collecting tensorboard<1.14.0,>=1.13.0 (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5))\n",
  147. "\u001b[?25l Downloading https://files.pythonhosted.org/packages/0f/39/bdd75b08a6fba41f098b6cb091b9e8c7a80e1b4d679a581a0ccd17b10373/tensorboard-1.13.1-py3-none-any.whl (3.2MB)\n",
  148. "\u001b[K |████████████████████████████████| 3.2MB 25.5MB/s \n",
  149. "\u001b[?25hRequirement already satisfied: absl-py>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (0.7.1)\n",
  150. "Requirement already satisfied: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (0.2.2)\n",
  151. "Collecting plaidml (from plaidml-keras==0.5.0->-r /content/DeepFaceLab/requirements-colab.txt (line 6))\n",
  152. "\u001b[?25l Downloading https://files.pythonhosted.org/packages/05/48/76071904028f16b8fcf86e021eaa297e69fb7f816f1d95162292e85da989/plaidml-0.6.4-py2.py3-none-manylinux1_x86_64.whl (32.1MB)\n",
  153. "\u001b[K |████████████████████████████████| 32.1MB 1.5MB/s \n",
  154. "\u001b[?25hRequirement already satisfied: imageio>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (2.4.1)\n",
  155. "Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (2.3)\n",
  156. "Requirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (4.3.0)\n",
  157. "Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (1.0.3)\n",
  158. "Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (3.0.3)\n",
  159. "Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from ffmpeg-python==0.1.17->-r /content/DeepFaceLab/requirements-colab.txt (line 9)) (0.16.0)\n",
  160. "Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (41.0.1)\n",
  161. "Collecting mock>=2.0.0 (from tensorflow-estimator<1.14.0rc0,>=1.13.0->tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5))\n",
  162. " Downloading https://files.pythonhosted.org/packages/05/d2/f94e68be6b17f46d2c353564da56e6fb89ef09faeeff3313a046cb810ca9/mock-3.0.5-py2.py3-none-any.whl\n",
  163. "Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.14.0,>=1.13.0->tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (0.15.5)\n",
  164. "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.14.0,>=1.13.0->tensorflow-gpu==1.13.1->-r /content/DeepFaceLab/requirements-colab.txt (line 5)) (3.1.1)\n",
  165. "Requirement already satisfied: cffi in /usr/local/lib/python3.6/dist-packages (from plaidml->plaidml-keras==0.5.0->-r /content/DeepFaceLab/requirements-colab.txt (line 6)) (1.12.3)\n",
  166. "Collecting enum34>=1.1.6 (from plaidml->plaidml-keras==0.5.0->-r /content/DeepFaceLab/requirements-colab.txt (line 6))\n",
  167. " Downloading https://files.pythonhosted.org/packages/af/42/cb9355df32c69b553e72a2e28daee25d1611d2c0d9c272aa1d34204205b2/enum34-1.1.6-py3-none-any.whl\n",
  168. "Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.0->scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (4.4.0)\n",
  169. "Requirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from pillow>=4.3.0->scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (0.46)\n",
  170. "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (0.10.0)\n",
  171. "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (1.1.0)\n",
  172. "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (2.5.3)\n",
  173. "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r /content/DeepFaceLab/requirements-colab.txt (line 7)) (2.4.2)\n",
  174. "Requirement already satisfied: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi->plaidml->plaidml-keras==0.5.0->-r /content/DeepFaceLab/requirements-colab.txt (line 6)) (2.19)\n",
  175. "Building wheels for collected packages: keras-contrib\n",
  176. " Building wheel for keras-contrib (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
  177. " Created wheel for keras-contrib: filename=keras_contrib-2.0.8-cp36-none-any.whl size=101066 sha256=afa2fe4faf1b8ceca6cd05a73e72662ca051101d9fbf76dd37c446c84bb0e1d6\n",
  178. " Stored in directory: /tmp/pip-ephem-wheel-cache-j5bxmosh/wheels/11/27/c8/4ed56de7b55f4f61244e2dc6ef3cdbaff2692527a2ce6502ba\n",
  179. "Successfully built keras-contrib\n",
  180. "\u001b[31mERROR: tensorflow 1.14.0 has requirement tensorboard<1.15.0,>=1.14.0, but you'll have tensorboard 1.13.1 which is incompatible.\u001b[0m\n",
  181. "\u001b[31mERROR: tensorflow 1.14.0 has requirement tensorflow-estimator<1.15.0rc0,>=1.14.0rc0, but you'll have tensorflow-estimator 1.13.0 which is incompatible.\u001b[0m\n",
  182. "\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n",
  183. "\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\n",
  184. "Installing collected packages: numpy, h5py, opencv-python, mock, tensorflow-estimator, tensorboard, tensorflow-gpu, enum34, plaidml, plaidml-keras, ffmpeg-python, keras-contrib\n",
  185. " Found existing installation: numpy 1.16.4\n",
  186. " Uninstalling numpy-1.16.4:\n",
  187. " Successfully uninstalled numpy-1.16.4\n",
  188. " Found existing installation: h5py 2.8.0\n",
  189. " Uninstalling h5py-2.8.0:\n",
  190. " Successfully uninstalled h5py-2.8.0\n",
  191. " Found existing installation: opencv-python 3.4.5.20\n",
  192. " Uninstalling opencv-python-3.4.5.20:\n",
  193. " Successfully uninstalled opencv-python-3.4.5.20\n",
  194. " Found existing installation: tensorflow-estimator 1.14.0\n",
  195. " Uninstalling tensorflow-estimator-1.14.0:\n",
  196. " Successfully uninstalled tensorflow-estimator-1.14.0\n",
  197. " Found existing installation: tensorboard 1.14.0\n",
  198. " Uninstalling tensorboard-1.14.0:\n",
  199. " Successfully uninstalled tensorboard-1.14.0\n",
  200. "Successfully installed enum34-1.1.6 ffmpeg-python-0.1.17 h5py-2.9.0 keras-contrib-2.0.8 mock-3.0.5 numpy-1.16.3 opencv-python-4.0.0.21 plaidml-0.6.4 plaidml-keras-0.5.0 tensorboard-1.13.1 tensorflow-estimator-1.13.0 tensorflow-gpu-1.13.1\n"
  201. ],
  202. "name": "stdout"
  203. },
  204. {
  205. "output_type": "display_data",
  206. "data": {
  207. "application/vnd.colab-display-data+json": {
  208. "pip_warning": {
  209. "packages": [
  210. "enum",
  211. "numpy"
  212. ]
  213. }
  214. }
  215. },
  216. "metadata": {
  217. "tags": []
  218. }
  219. },
  220. {
  221. "output_type": "stream",
  222. "text": [
  223. "Requirement already up-to-date: scikit-image in /usr/local/lib/python3.6/dist-packages (0.15.0)\n",
  224. "Requirement already satisfied, skipping upgrade: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (4.3.0)\n",
  225. "Requirement already satisfied, skipping upgrade: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (3.0.3)\n",
  226. "Requirement already satisfied, skipping upgrade: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (1.0.3)\n",
  227. "Requirement already satisfied, skipping upgrade: scipy>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (1.3.1)\n",
  228. "Requirement already satisfied, skipping upgrade: networkx>=2.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (2.3)\n",
  229. "Requirement already satisfied, skipping upgrade: imageio>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (2.4.1)\n",
  230. "Requirement already satisfied, skipping upgrade: olefile in /usr/local/lib/python3.6/dist-packages (from pillow>=4.3.0->scikit-image) (0.46)\n",
  231. "Requirement already satisfied, skipping upgrade: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (2.4.2)\n",
  232. "Requirement already satisfied, skipping upgrade: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (1.1.0)\n",
  233. "Requirement already satisfied, skipping upgrade: numpy>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (1.16.3)\n",
  234. "Requirement already satisfied, skipping upgrade: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (2.5.3)\n",
  235. "Requirement already satisfied, skipping upgrade: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (0.10.0)\n",
  236. "Requirement already satisfied, skipping upgrade: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.0->scikit-image) (4.4.0)\n",
  237. "Requirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib!=3.0.0,>=2.0.0->scikit-image) (41.0.1)\n",
  238. "Requirement already satisfied, skipping upgrade: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.1->matplotlib!=3.0.0,>=2.0.0->scikit-image) (1.12.0)\n",
  239. "warning [/content/pretrain_CelebA.zip]: 457793 extra bytes at beginning or within zipfile\n",
  240. " (attempting to process anyway)\n",
  241. "Done!\n"
  242. ],
  243. "name": "stdout"
  244. }
  245. ]
  246. },
  247. {
  248. "cell_type": "markdown",
  249. "metadata": {
  250. "id": "hqwOlJG4MdLC",
  251. "colab_type": "text"
  252. },
  253. "source": [
  254. "#Manage workspace\n",
  255. "\n",
  256. "\n",
  257. "\n",
  258. "* You can import/export workspace or individual data, like model files with Google Drive\n",
  259. "* Also, you can use HFS (HTTP Fileserver) for directly import/export you workspace from your computer\n",
  260. "* You can clear all workspace or delete part of it\n",
  261. "\n"
  262. ]
  263. },
  264. {
  265. "cell_type": "code",
  266. "metadata": {
  267. "id": "z4w_sUzgOQmL",
  268. "colab_type": "code",
  269. "cellView": "both",
  270. "colab": {
  271. "base_uri": "https://localhost:8080/",
  272. "height": 127
  273. },
  274. "outputId": "2161ac32-b0a2-4121-a6d4-8ef831475a24"
  275. },
  276. "source": [
  277. "#@title Import from Drive\n",
  278. "\n",
  279. "Mode = \"models\" #@param [\"workspace\", \"data_src\", \"data_dst\", \"data_src aligned\", \"data_dst aligned\", \"models\"]\n",
  280. "Archive_name = \"msdf.zip\" #@param {type:\"string\"}\n",
  281. "\n",
  282. "#Mount Google Drive as folder\n",
  283. "from google.colab import drive\n",
  284. "drive.mount('/content/drive', force_remount=True)\n",
  285. "\n",
  286. "def zip_and_copy(path, mode):\n",
  287. " unzip_cmd=\" -q \"+Archive_name\n",
  288. " \n",
  289. " %cd $path\n",
  290. " copy_cmd = \"/content/drive/My\\ Drive/\"+Archive_name+\" \"+path\n",
  291. " !cp $copy_cmd\n",
  292. " !unzip $unzip_cmd \n",
  293. " !rm $Archive_name\n",
  294. "\n",
  295. "if Mode == \"workspace\":\n",
  296. " zip_and_copy(\"/content\", \"workspace\")\n",
  297. "elif Mode == \"data_src\":\n",
  298. " zip_and_copy(\"/content/workspace\", \"data_src\")\n",
  299. "elif Mode == \"data_dst\":\n",
  300. " zip_and_copy(\"/content/workspace\", \"data_dst\")\n",
  301. "elif Mode == \"data_src aligned\":\n",
  302. " zip_and_copy(\"/content/workspace/data_src\", \"aligned\")\n",
  303. "elif Mode == \"data_dst aligned\":\n",
  304. " zip_and_copy(\"/content/workspace/data_dst\", \"aligned\")\n",
  305. "elif Mode == \"models\":\n",
  306. " zip_and_copy(\"/content/workspace\", \"model\")\n",
  307. " \n",
  308. "print(\"Done!\")\n",
  309. "\n"
  310. ],
  311. "execution_count": 4,
  312. "outputs": [
  313. {
  314. "output_type": "stream",
  315. "text": [
  316. "Mounted at /content/drive\n",
  317. "/content/workspace\n",
  318. "cp: cannot stat '/content/drive/My Drive/msdf.zip': No such file or directory\n",
  319. "unzip: cannot find or open msdf.zip, msdf.zip.zip or msdf.zip.ZIP.\n",
  320. "rm: cannot remove 'msdf.zip': No such file or directory\n",
  321. "Done!\n"
  322. ],
  323. "name": "stdout"
  324. }
  325. ]
  326. },
  327. {
  328. "cell_type": "code",
  329. "metadata": {
  330. "id": "0Y3WfuwoNXqC",
  331. "colab_type": "code",
  332. "cellView": "form",
  333. "colab": {
  334. "base_uri": "https://localhost:8080/",
  335. "height": 72
  336. },
  337. "outputId": "1b873a84-b899-4fa5-d107-348159c0134c"
  338. },
  339. "source": [
  340. "#@title Export to Drive { form-width: \"30%\" }\n",
  341. "Mode = \"models\" #@param [\"workspace\", \"data_src\", \"data_dst\", \"data_src aligned\", \"data_dst aligned\", \"merged\", \"models\"]\n",
  342. "Archive_name = \"msdf.zip\" #@param {type:\"string\"}\n",
  343. "\n",
  344. "#Mount Google Drive as folder\n",
  345. "from google.colab import drive\n",
  346. "drive.mount('/content/drive', force_remount=True)\n",
  347. "\n",
  348. "def zip_and_copy(path, mode):\n",
  349. " zip_cmd=\"-r -q \"+Archive_name+\" \"\n",
  350. " \n",
  351. " %cd $path\n",
  352. " zip_cmd+=mode\n",
  353. " !zip $zip_cmd\n",
  354. " copy_cmd = \" \"+Archive_name+\" /content/drive/My\\ Drive/\"\n",
  355. " !cp $copy_cmd\n",
  356. " !rm $Archive_name\n",
  357. "\n",
  358. "if Mode == \"workspace\":\n",
  359. " zip_and_copy(\"/content\", \"workspace\")\n",
  360. "elif Mode == \"data_src\":\n",
  361. " zip_and_copy(\"/content/workspace\", \"data_src\")\n",
  362. "elif Mode == \"data_dst\":\n",
  363. " zip_and_copy(\"/content/workspace\", \"data_dst\")\n",
  364. "elif Mode == \"data_src aligned\":\n",
  365. " zip_and_copy(\"/content/workspace/data_src\", \"aligned\")\n",
  366. "elif Mode == \"data_dst aligned\":\n",
  367. " zip_and_copy(\"/content/workspace/data_dst\", \"aligned\")\n",
  368. "elif Mode == \"merged\":\n",
  369. " zip_and_copy(\"/content/workspace/data_dst\", \"merged\")\n",
  370. "elif Mode == \"models\":\n",
  371. " zip_and_copy(\"/content/workspace\", \"model\")\n",
  372. " \n",
  373. "print(\"Done!\")\n"
  374. ],
  375. "execution_count": 6,
  376. "outputs": [
  377. {
  378. "output_type": "stream",
  379. "text": [
  380. "Mounted at /content/drive\n",
  381. "/content/workspace\n",
  382. "Done!\n"
  383. ],
  384. "name": "stdout"
  385. }
  386. ]
  387. },
  388. {
  389. "cell_type": "code",
  390. "metadata": {
  391. "id": "0hIvJtxwTGcb",
  392. "colab_type": "code",
  393. "colab": {
  394. "base_uri": "https://localhost:8080/",
  395. "height": 54
  396. },
  397. "outputId": "a085ba81-7137-402c-a284-7281d1a81d9e"
  398. },
  399. "source": [
  400. "#@title Import from URL{ form-width: \"30%\", display-mode: \"form\" }\n",
  401. "URL = \"http://195.201.97.169:60090/msdf.zip\" #@param {type:\"string\"}\n",
  402. "Mode = \"unzip to content\" #@param [\"unzip to content\", \"unzip to content/workspace\", \"unzip to content/workspace/data_src\", \"unzip to content/workspace/data_src/aligned\", \"unzip to content/workspace/data_dst\", \"unzip to content/workspace/data_dst/aligned\", \"unzip to content/workspace/model\", \"download to content/workspace\"]\n",
  403. "\n",
  404. "import urllib\n",
  405. "from pathlib import Path\n",
  406. "\n",
  407. "def unzip(zip_path, dest_path):\n",
  408. "\n",
  409. " \n",
  410. " unzip_cmd = \" unzip -q \" + zip_path + \" -d \"+dest_path\n",
  411. " !$unzip_cmd \n",
  412. " rm_cmd = \"rm \"+dest_path + url_path.name\n",
  413. " !$rm_cmd\n",
  414. " print(\"Unziped!\")\n",
  415. " \n",
  416. "\n",
  417. "if Mode == \"unzip to content\":\n",
  418. " dest_path = \"/content/\"\n",
  419. "elif Mode == \"unzip to content/workspace\":\n",
  420. " dest_path = \"/content/workspace/\"\n",
  421. "elif Mode == \"unzip to content/workspace/data_src\":\n",
  422. " dest_path = \"/content/workspace/data_src/\"\n",
  423. "elif Mode == \"unzip to content/workspace/data_src/aligned\":\n",
  424. " dest_path = \"/content/workspace/data_src/aligned/\"\n",
  425. "elif Mode == \"unzip to content/workspace/data_dst\":\n",
  426. " dest_path = \"/content/workspace/data_dst/\"\n",
  427. "elif Mode == \"unzip to content/workspace/data_dst/aligned\":\n",
  428. " dest_path = \"/content/workspace/data_dst/aligned/\"\n",
  429. "elif Mode == \"unzip to content/workspace/model\":\n",
  430. " dest_path = \"/content/workspace/model/\"\n",
  431. "elif Mode == \"download to content/workspace\":\n",
  432. " dest_path = \"/content/workspace/\"\n",
  433. "\n",
  434. "if not Path(\"/content/workspace\").exists():\n",
  435. " cmd = \"mkdir /content/workspace; mkdir /content/workspace/data_src; mkdir /content/workspace/data_src/aligned; mkdir /content/workspace/data_dst; mkdir /content/workspace/data_dst/aligned; mkdir /content/workspace/model\"\n",
  436. " !$cmd\n",
  437. "\n",
  438. "url_path = Path(URL)\n",
  439. "urllib.request.urlretrieve ( URL, dest_path + url_path.name )\n",
  440. "\n",
  441. "if (url_path.suffix == \".zip\") and (Mode!=\"download to content/workspace\"):\n",
  442. " unzip(dest_path + url_path.name, dest_path)\n",
  443. "\n",
  444. " \n",
  445. "print(\"Done!\")"
  446. ],
  447. "execution_count": 2,
  448. "outputs": [
  449. {
  450. "output_type": "stream",
  451. "text": [
  452. "Unziped!\n",
  453. "Done!\n"
  454. ],
  455. "name": "stdout"
  456. }
  457. ]
  458. },
  459. {
  460. "cell_type": "code",
  461. "metadata": {
  462. "id": "7V1sc7rxNKLO",
  463. "colab_type": "code",
  464. "cellView": "both",
  465. "colab": {
  466. "base_uri": "https://localhost:8080/",
  467. "height": 127
  468. },
  469. "outputId": "da761fe6-f5d1-4ddf-95b1-f5714b9c0087"
  470. },
  471. "source": [
  472. "#@title Export to URL\n",
  473. "URL = \"http://195.201.97.169:60090/up.php\" #@param {type:\"string\"}\n",
  474. "Mode = \"upload model\" #@param [\"upload workspace\", \"upload data_src\", \"upload data_dst\", \"upload data_src aligned\", \"upload data_dst aligned\", \"upload merged\", \"upload model\"]\n",
  475. "\n",
  476. "cmd_zip = \"zip -r -q \"\n",
  477. "\n",
  478. "def run_cmd(zip_path, curl_url):\n",
  479. " cmd_zip = \"zip -r -q \"+zip_path\n",
  480. " cmd_curl = \"curl -F \"+curl_url+\" -D out.txt \"\n",
  481. " !$cmd_zip\n",
  482. " print(cmd_curl)\n",
  483. " !$cmd_curl\n",
  484. "\n",
  485. "\n",
  486. "if Mode == \"upload workspace\":\n",
  487. " %cd \"/content\"\n",
  488. " run_cmd(\"workspace.zip workspace/\",\"'data=@/content/workspace.zip' \"+URL)\n",
  489. "elif Mode == \"upload data_src\":\n",
  490. " %cd \"/content/workspace\"\n",
  491. " print(\"data_src.zip data_src/\", \"'data=@/content/workspace/data_src.zip' \"+URL)\n",
  492. " run_cmd(\"data_src.zip data_src/\", \"'data=@/content/workspace/data_src.zip' \"+URL)\n",
  493. "elif Mode == \"upload data_dst\":\n",
  494. " %cd \"/content/workspace\"\n",
  495. " run_cmd(\"data_dst.zip data_dst/\", \"'data=@/content/workspace/data_dst.zip' \"+URL)\n",
  496. "elif Mode == \"upload data_src aligned\":\n",
  497. " %cd \"/content/workspace\"\n",
  498. " run_cmd(\"data_src_aligned.zip data_src/aligned\", \"'data=@/content/workspace/data_src_aligned.zip' \"+URL )\n",
  499. "elif Mode == \"upload data_dst aligned\":\n",
  500. " %cd \"/content/workspace\"\n",
  501. " run_cmd(\"data_dst_aligned.zip data_dst/aligned/\", \"'data=@/content/workspace/data_dst_aligned.zip' \"+URL)\n",
  502. "elif Mode == \"upload merged\":\n",
  503. " %cd \"/content/workspace/data_dst\"\n",
  504. " run_cmd(\"merged.zip merged/\",\"'data=@/content/workspace/data_dst/merged.zip' \"+URL )\n",
  505. "elif Mode == \"upload model\":\n",
  506. " %cd \"/content/workspace\"\n",
  507. " run_cmd(\"\"+Archive_name+\" model/\", \"'data=@/content/workspace/\"+Archive_name+\"' \"+URL)\n",
  508. " \n",
  509. " \n",
  510. "!rm *.zip\n",
  511. "\n",
  512. "%cd \"/content\"\n",
  513. "print(\"Done!\")"
  514. ],
  515. "execution_count": 18,
  516. "outputs": [
  517. {
  518. "output_type": "stream",
  519. "text": [
  520. "/content/workspace\n",
  521. "curl -F 'data=@/content/workspace/msdf.zip' http://195.201.97.169:60090/up.php -D out.txt \n",
  522. "File is valid, and was successfully uploaded.\n",
  523. "\n",
  524. "OK/content\n",
  525. "Done!\n"
  526. ],
  527. "name": "stdout"
  528. }
  529. ]
  530. },
  531. {
  532. "cell_type": "code",
  533. "metadata": {
  534. "id": "Ta6ue_UGMkki",
  535. "colab_type": "code",
  536. "cellView": "form",
  537. "colab": {}
  538. },
  539. "source": [
  540. "#@title Delete and recreate\n",
  541. "Mode = \"Delete and recreate workspace\" #@param [\"Delete and recreate workspace\", \"Delete models\", \"Delete data_src\", \"Delete data_src aligned\", \"Delete data_src video\", \"Delete data_dst\", \"Delete data_dst aligned\", \"Delete merged frames\"]\n",
  542. "\n",
  543. "%cd \"/content\" \n",
  544. "\n",
  545. "if Mode == \"Delete and recreate workspace\":\n",
  546. " cmd = \"rm -r /content/workspace ; mkdir /content/workspace; mkdir /content/workspace/data_src; mkdir /content/workspace/data_src/aligned; mkdir /content/workspace/data_dst; mkdir /content/workspace/data_dst/aligned; mkdir /content/workspace/model\" \n",
  547. "elif Mode == \"Delete models\":\n",
  548. " cmd = \"rm -r /content/workspace/model/*\"\n",
  549. "elif Mode == \"Delete data_src\":\n",
  550. " cmd = \"rm /content/workspace/data_src/*.png || rm -r /content/workspace/data_src/*.jpg\"\n",
  551. "elif Mode == \"Delete data_src aligned\":\n",
  552. " cmd = \"rm -r /content/workspace/data_src/aligned/*\"\n",
  553. "elif Mode == \"Delete data_src video\":\n",
  554. " cmd = \"rm -r /content/workspace/data_src.*\"\n",
  555. "elif Mode == \"Delete data_dst\":\n",
  556. " cmd = \"rm /content/workspace/data_dst/*.png || rm /content/workspace/data_dst/*.jpg\"\n",
  557. "elif Mode == \"Delete data_dst aligned\":\n",
  558. " cmd = \"rm -r /content/workspace/data_dst/aligned/*\"\n",
  559. "elif Mode == \"Delete merged frames\":\n",
  560. " cmd = \"rm -r /content/workspace/data_dst/merged\"\n",
  561. " \n",
  562. "!$cmd\n",
  563. "print(\"Done!\")"
  564. ],
  565. "execution_count": 0,
  566. "outputs": []
  567. },
  568. {
  569. "cell_type": "markdown",
  570. "metadata": {
  571. "id": "tUNVcbujhm00",
  572. "colab_type": "text"
  573. },
  574. "source": [
  575. "# Extract and sorting\n",
  576. "* Extract frames for SRC or DST video.\n",
  577. "* Denoise SRC or DST video. \"Factor\" param set intesity of denoising\n",
  578. "* Detect and align faces with one of detectors. (S3FD - recommended). If you need, you can get frames with debug landmarks.\n",
  579. "* Export workspace to Google Drive after extract and sort it manually (Last block of Notebook)\n"
  580. ]
  581. },
  582. {
  583. "cell_type": "code",
  584. "metadata": {
  585. "id": "qwJEbz5Nhot0",
  586. "colab_type": "code",
  587. "cellView": "form",
  588. "colab": {}
  589. },
  590. "source": [
  591. "#@title Extract frames\n",
  592. "Video = \"data_src\" #@param [\"data_src\", \"data_dst\"]\n",
  593. "\n",
  594. "%cd \"/content\"\n",
  595. "\n",
  596. "cmd = \"DeepFaceLab/main.py videoed extract-video\"\n",
  597. "\n",
  598. "if Video == \"data_dst\":\n",
  599. " cmd+= \" --input-file workspace/data_dst.* --output-dir workspace/data_dst/\"\n",
  600. "else:\n",
  601. " cmd+= \" --input-file workspace/data_src.* --output-dir workspace/data_src/\"\n",
  602. " \n",
  603. "!python $cmd"
  604. ],
  605. "execution_count": 0,
  606. "outputs": []
  607. },
  608. {
  609. "cell_type": "code",
  610. "metadata": {
  611. "id": "bFmPo0s2lTil",
  612. "colab_type": "code",
  613. "cellView": "form",
  614. "colab": {}
  615. },
  616. "source": [
  617. "#@title Denoise frames\n",
  618. "Data = \"data_src\" #@param [\"data_src\", \"data_dst\"]\n",
  619. "Factor = 1 #@param {type:\"slider\", min:1, max:20, step:1}\n",
  620. "\n",
  621. "cmd = \"DeepFaceLab/main.py videoed denoise-image-sequence --input-dir workspace/\"+Data+\" --factor \"+str(Factor)\n",
  622. "\n",
  623. "%cd \"/content\"\n",
  624. "!python $cmd"
  625. ],
  626. "execution_count": 0,
  627. "outputs": []
  628. },
  629. {
  630. "cell_type": "code",
  631. "metadata": {
  632. "id": "nmq0Sj2bmq7d",
  633. "colab_type": "code",
  634. "cellView": "form",
  635. "colab": {}
  636. },
  637. "source": [
  638. "#@title Detect faces\n",
  639. "Data = \"data_src\" #@param [\"data_src\", \"data_dst\"]\n",
  640. "Detector = \"S3FD\" #@param [\"S3FD\", \"MT\"]\n",
  641. "Debug = False #@param {type:\"boolean\"}\n",
  642. "\n",
  643. "detect_type = \"s3fd\"\n",
  644. "if Detector == \"S3FD\":\n",
  645. " detect_type = \"s3fd\"\n",
  646. "elif Detector == \"MT\":\n",
  647. " detect_type = \"mt\"\n",
  648. "\n",
  649. "folder = \"workspace/\"+Data\n",
  650. "folder_align = folder+\"/aligned\"\n",
  651. "debug_folder = folder_align+\"/debug\"\n",
  652. "\n",
  653. "cmd = \"DeepFaceLab/main.py extract --input-dir \"+folder+\" --output-dir \"+folder_align\n",
  654. "\n",
  655. "if Debug:\n",
  656. " cmd+= \" --debug-dir \"+debug_folder\n",
  657. "\n",
  658. "cmd+=\" --detector \"+detect_type\n",
  659. " \n",
  660. "%cd \"/content\"\n",
  661. "!python $cmd"
  662. ],
  663. "execution_count": 0,
  664. "outputs": []
  665. },
  666. {
  667. "cell_type": "code",
  668. "metadata": {
  669. "id": "TRNxUFE6p6Eu",
  670. "colab_type": "code",
  671. "cellView": "form",
  672. "colab": {}
  673. },
  674. "source": [
  675. "#@title Sort aligned\n",
  676. "Data = \"data_src\" #@param [\"data_src\", \"data_dst\"]\n",
  677. "sort_type = \"hist\" #@param [\"hist\", \"hist-dissim\", \"face-yaw\", \"face-pitch\", \"blur\", \"final\"]\n",
  678. "\n",
  679. "cmd = \"DeepFaceLab/main.py sort --input-dir workspace/\"+Data+\"/aligned --by \"+sort_type\n",
  680. "\n",
  681. "%cd \"/content\"\n",
  682. "!python $cmd"
  683. ],
  684. "execution_count": 0,
  685. "outputs": []
  686. },
  687. {
  688. "cell_type": "markdown",
  689. "metadata": {
  690. "id": "WTuyUxgdLA13",
  691. "colab_type": "text"
  692. },
  693. "source": [
  694. "# Train model\n",
  695. "\n",
  696. "* Choose your model type, but SAE is recommend for everyone\n",
  697. "* Set model options on output field\n",
  698. "* You can see preview manually, if go to model folder in filemanager and double click on preview.jpg file\n",
  699. "* Your workspace will be archived and upload to mounted Drive after 11 hours from start session\n",
  700. "* If you select \"Backup_every_hour\" option, your workspace will be backed up every hour.\n",
  701. "* Also, you can export your workspace manually in \"Manage workspace\" block"
  702. ]
  703. },
  704. {
  705. "cell_type": "code",
  706. "metadata": {
  707. "id": "Z0Kya-PJLDhv",
  708. "colab_type": "code",
  709. "cellView": "form",
  710. "colab": {
  711. "base_uri": "https://localhost:8080/",
  712. "height": 1000
  713. },
  714. "outputId": "54275d17-2ac9-43d7-ffc9-413fffdeb379"
  715. },
  716. "source": [
  717. "#@title Training\n",
  718. "Model = \"SAE\" #@param [\"SAE\", \"H128\", \"LIAEF128\", \"DF\", \"DEV_FANSEG\", \"RecycleGAN\"]\n",
  719. "Backup_every_hour = False #@param {type:\"boolean\"}\n",
  720. "\n",
  721. "%cd \"/content\"\n",
  722. "\n",
  723. "#Mount Google Drive as folder\n",
  724. "from google.colab import drive\n",
  725. "drive.mount('/content/drive')\n",
  726. "\n",
  727. "import psutil, os, time\n",
  728. "\n",
  729. "p = psutil.Process(os.getpid())\n",
  730. "uptime = time.time() - p.create_time()\n",
  731. "\n",
  732. "if (Backup_every_hour):\n",
  733. " if not os.path.exists('workspace.zip'):\n",
  734. " print(\"Creating workspace archive ...\")\n",
  735. " !zip -r -q workspace.zip workspace\n",
  736. " print(\"Archive created!\")\n",
  737. " else:\n",
  738. " print(\"Archive exist!\")\n",
  739. "\n",
  740. "if (Backup_every_hour):\n",
  741. " print(\"Time to end session: \"+str(round((43200-uptime)/3600))+\" hours\")\n",
  742. " backup_time = str(3600)\n",
  743. " backup_cmd = \" --execute-program -\"+backup_time+\" \\\"import os; os.system('zip -r -q workspace.zip workspace/model'); os.system('cp /content/workspace.zip /content/drive/My\\ Drive/'); print(' Backuped!') \\\"\" \n",
  744. "elif (round(39600-uptime) > 0):\n",
  745. " print(\"Time to backup: \"+str(round((39600-uptime)/3600))+\" hours\")\n",
  746. " backup_time = str(round(39600-uptime))\n",
  747. " backup_cmd = \" --execute-program \"+backup_time+\" \\\"import os; os.system('zip -r -q workspace.zip workspace'); os.system('cp /content/workspace.zip /content/drive/My\\ Drive/'); print(' Backuped!') \\\"\" \n",
  748. "else:\n",
  749. " print(\"Session expires in less than an hour.\")\n",
  750. " backup_cmd = \"\"\n",
  751. " \n",
  752. "cmd = \"DeepFaceLab/main.py train --training-data-src-dir workspace/data_src/aligned --training-data-dst-dir workspace/data_dst/aligned --pretraining-data-dir pretrain/aligned --model-dir workspace/model --model \"+Model\n",
  753. " \n",
  754. "if (backup_cmd != \"\"):\n",
  755. " train_cmd = (cmd+backup_cmd)\n",
  756. "else:\n",
  757. " train_cmd = (cmd)\n",
  758. "\n",
  759. "!python $train_cmd"
  760. ],
  761. "execution_count": 17,
  762. "outputs": [
  763. {
  764. "output_type": "stream",
  765. "text": [
  766. "/content\n",
  767. "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n",
  768. "Time to backup: 3 hours\n",
  769. "Running trainer.\n",
  770. "\n",
  771. "Loading model...\n",
  772. "Press enter in 2 seconds to override model settings./usr/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 1 leaked semaphores to clean up at shutdown\n",
  773. " len(cache))\n",
  774. "Using TensorFlow backend.\n",
  775. "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
  776. "Instructions for updating:\n",
  777. "Colocations handled automatically by placer.\n",
  778. "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
  779. "Instructions for updating:\n",
  780. "Use tf.cast instead.\n",
  781. "Loading: 100% 2801/2801 [00:04<00:00, 601.76it/s]\n",
  782. "Loading: 100% 2140/2140 [00:02<00:00, 725.26it/s]\n",
  783. "========== Model Summary ==========\n",
  784. "== ==\n",
  785. "== Model name: SAE ==\n",
  786. "== ==\n",
  787. "== Current iteration: 4231 ==\n",
  788. "== ==\n",
  789. "==-------- Model Options --------==\n",
  790. "== ==\n",
  791. "== batch_size: 3 ==\n",
  792. "== sort_by_yaw: False ==\n",
  793. "== random_flip: True ==\n",
  794. "== resolution: 224 ==\n",
  795. "== face_type: f ==\n",
  796. "== learn_mask: True ==\n",
  797. "== optimizer_mode: 1 ==\n",
  798. "== archi: df ==\n",
  799. "== ae_dims: 512 ==\n",
  800. "== e_ch_dims: 42 ==\n",
  801. "== d_ch_dims: 21 ==\n",
  802. "== multiscale_decoder: False ==\n",
  803. "== ca_weights: False ==\n",
  804. "== pixel_loss: False ==\n",
  805. "== face_style_power: 0.0 ==\n",
  806. "== bg_style_power: 0.0 ==\n",
  807. "== apply_random_ct: False ==\n",
  808. "== clipgrad: True ==\n",
  809. "== ==\n",
  810. "==--------- Running On ----------==\n",
  811. "== ==\n",
  812. "== Device index: 0 ==\n",
  813. "== Name: Tesla K80 ==\n",
  814. "== VRAM: 11.00GB ==\n",
  815. "== ==\n",
  816. "===================================\n",
  817. "Starting. Press \"Enter\" to stop training and save model.\n",
  818. "[16:47:50][#004447][3855ms][0.5671][0.3423]\n",
  819. "[17:03:03][#004669][3827ms][0.5603][0.3440]\n",
  820. "[17:18:17][#004891][3909ms][0.5506][0.3421]\n",
  821. "Done.\n",
  822. "/usr/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 1 leaked semaphores to clean up at shutdown\n",
  823. " len(cache))\n",
  824. "/usr/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 1 leaked semaphores to clean up at shutdown\n",
  825. " len(cache))\n",
  826. "/usr/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 1 leaked semaphores to clean up at shutdown\n",
  827. " len(cache))\n",
  828. "/usr/lib/python3.6/multiprocessing/semaphore_tracker.py:143: UserWarning: semaphore_tracker: There appear to be 1 leaked semaphores to clean up at shutdown\n",
  829. " len(cache))\n"
  830. ],
  831. "name": "stdout"
  832. }
  833. ]
  834. },
  835. {
  836. "cell_type": "markdown",
  837. "metadata": {
  838. "id": "avAcSL_uvtq_",
  839. "colab_type": "text"
  840. },
  841. "source": [
  842. "# Convert frames"
  843. ]
  844. },
  845. {
  846. "cell_type": "code",
  847. "metadata": {
  848. "id": "A3Y8K22Sv9Gn",
  849. "colab_type": "code",
  850. "cellView": "form",
  851. "colab": {}
  852. },
  853. "source": [
  854. "#@title Convert\n",
  855. "Model = \"SAE\" #@param [\"SAE\", \"H128\", \"LIAEF128\", \"DF\", \"RecycleGAN\"]\n",
  856. "\n",
  857. "cmd = \"DeepFaceLab/main.py convert --input-dir workspace/data_dst --output-dir workspace/data_dst/merged --aligned-dir workspace/data_dst/aligned --model-dir workspace/model --model \"+Model\n",
  858. "\n",
  859. "%cd \"/content\"\n",
  860. "!python $cmd"
  861. ],
  862. "execution_count": 0,
  863. "outputs": []
  864. },
  865. {
  866. "cell_type": "code",
  867. "metadata": {
  868. "id": "JNeGfiZpxlnz",
  869. "colab_type": "code",
  870. "cellView": "form",
  871. "colab": {}
  872. },
  873. "source": [
  874. "#@title Get result video and copy to Drive \n",
  875. "\n",
  876. "!python DeepFaceLab/main.py videoed video-from-sequence --input-dir workspace/data_dst/merged --output-file workspace/result.mp4 --reference-file workspace/data_dst.mp4\n",
  877. "!cp /content/workspace/result.mp4 /content/drive/My\\ Drive/"
  878. ],
  879. "execution_count": 0,
  880. "outputs": []
  881. }
  882. ]
  883. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement