Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- FROM pytorch/pytorch:2.4.1-cuda12.4-cudnn9-devel
- RUN apt-get update
- RUN apt-get install -y git
- RUN apt-get install -y wget
- RUN apt-get install -y ninja-build
- RUN apt-get install -y vim
- RUN apt-get install -y sudo
- RUN wget https://bootstrap.pypa.io/pip/get-pip.py
- RUN python3 get-pip.py --break-system-packages --root-user-action=ignore
- RUN rm get-pip.py
- RUN pip install --upgrade pip --root-user-action=ignore
- RUN pip install Flask --root-user-action=ignore
- RUN pip install pybind11 --root-user-action=ignore
- RUN pip install ninja --root-user-action=ignore
- RUN pip install tensorboard --root-user-action=ignore
- RUN pip install opencv-python-headless --root-user-action=ignore
- RUN pip install 'git+https://github.com/facebookresearch/fvcore' --root-user-action=ignore
- # create a non-root user
- RUN useradd -m --no-log-init --system --uid 991 tron2 -g sudo
- RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
- USER tron2
- WORKDIR /home/tron2
- ENV PATH="/home/tron2/.local/bin:/usr/local/cuda/bin:/opt/conda/bin:/usr/local/nvidia/bin:${PATH}"
- RUN python -m pip install --upgrade setuptools wheel --root-user-action=ignore
- # install detectron2
- RUN git clone https://github.com/facebookresearch/detectron2 detectron2_repo
- # set FORCE_CUDA because during `docker build` cuda is not accessible
- ENV FORCE_CUDA="1"
- # This will by default build detectron2 for all common cuda architectures and take a lot more time,
- # because inside `docker build`, there is no way to tell which architecture will be used.
- ARG TORCH_CUDA_ARCH_LIST="Maxwell;Maxwell+Tegra;Pascal;Volta;Turing"
- ENV TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}"
- # Set the CUDA_HOME environment variable
- ENV CUDA_HOME=/usr/local/cuda
- ENV PATH=$CUDA_HOME/bin:$PATH
- ENV LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
- # install detectron2 that matches the current pytorch installation
- RUN pip install -e detectron2_repo --root-user-action=ignore
- # Set a fixed model cache directory.
- ENV FVCORE_CACHE="/tmp"
- # Set the working directory
- WORKDIR /home/tron2/detectron2_repo
- # Create the detectron_server.py file
- RUN echo "\
- import os\n\
- import io\n\
- from flask import Flask, request, send_file\n\
- import cv2\n\
- import numpy as np\n\
- from detectron2 import model_zoo\n\
- from detectron2.config import get_cfg\n\
- from detectron2.engine import DefaultPredictor\n\
- from detectron2.utils.visualizer import Visualizer, ColorMode\n\
- from detectron2.data import MetadataCatalog\n\
- \n\
- # Initialize the Flask app\n\
- app = Flask(__name__)\n\
- \n\
- # Load the Detectron2 model\n\
- def setup_predictor():\n\
- cfg = get_cfg()\n\
- cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\n\
- cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\n\
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # Set the threshold for this model\n\
- return DefaultPredictor(cfg), MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n\
- \n\
- predictor, metadata = setup_predictor()\n\
- \n\
- @app.route('/detect', methods=['POST'])\n\
- def detect_objects():\n\
- # Check if an image is provided\n\
- if 'image' not in request.files:\n\
- return {\"error\": \"No image uploaded\"}, 400\n\
- \n\
- file = request.files['image']\n\
- if not file:\n\
- return {\"error\": \"No image uploaded\"}, 400\n\
- \n\
- # Read the image in BGR format\n\
- image = np.asarray(bytearray(file.read()), dtype=np.uint8)\n\
- image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n\
- \n\
- if image is None:\n\
- return {\"error\": \"Invalid image\"}, 400\n\
- \n\
- # Perform inference\n\
- outputs = predictor(image)\n\
- \n\
- # Visualize the predictions with color masks\n\
- v = Visualizer(image[:, :, ::-1], metadata=metadata, instance_mode=ColorMode.SEGMENTATION)\n\
- result = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n\
- \n\
- # Convert result back to BGR format\n\
- result_image = result.get_image()[:, :, ::-1]\n\
- \n\
- # Save the result temporarily\n\
- output_path = 'output.jpg'\n\
- cv2.imwrite(output_path, result_image)\n\
- \n\
- # Return the modified image\n\
- return send_file(output_path, mimetype='image/jpeg')\n\
- \n\
- if __name__ == '__main__':\n\
- app.run(host='0.0.0.0', port=5001)\n\
- " > detectron_server.py
- # Expose the port for Flask
- EXPOSE 5001
- # Run the server
- CMD ["python3", "detectron_server.py"]
Advertisement
Comments
-
Comment was deleted
-
Comment was deleted
-
Comment was deleted
-
- # 1. save this file into folder named: `docker` in the name `Dockerfile`
- # 2. install docker desktop (for windows)
- # 3. from `cmd` run:
- cd docker
- docker build -t detectron2:v0 .
- docker run --gpus all -d --shm-size=12gb -p 5001:5001 --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" --name=detectron2 detectron2:v0
- # and to test is from your PC execute the command:
- curl -X POST -F "image=@path/to/your/input.jpg" http://localhost:5001/detect -o output.jpg
- ------------------------- Cheat-sheet: ---------------------------
- # Build:
- docker build -t detectron2:v0 .
- # Launch new container from the built docker image (require GPUs):
- docker run --gpus all -it --shm-size=12gb -p 5001:5001 --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" --name=detectron2 detectron2:v0
- # launch the existing container:
- docker start detectron2
- # Connect to the Container in Interactive Mode:
- docker exec -it detectron2 /bin/bash
- # create a linux wheel file for detectron2:
- cd /home/tron2/detectron2_repo/
- python3 setup.py bdist_wheel
- # created file will be at:
- /home/tron2/detectron2_repo/dist/detectron2-0.6-cp311-cp311-linux_x86_64.whl
Add Comment
Please, Sign In to add comment
Advertisement