Uploaded File access using third party python script in django

(Karan) #1

I have created a view that can upload a file and upon clicking the upload button, a third party script is executed. I have linked the script with by adding it as a custom command. Later i linked the script in the views.py with the call_command. It requires an image argument to be executed. I need to know how can i pass the uploaded file as an argument to the call_command function so that it sends that file as argument to the third party script.

my code here
views.py

from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
from django.core.management import call_command

def upload(request):
    global uploaded_file
    if request.method == 'POST':
        uploaded_file = request.FILES['document']
        fs = FileSystemStorage()
        fs.save(uploaded_file.name, uploaded_file)
        call_command('API', '--image={}'.format(request.FILES))
    return render(request, 'upload.html')

urls.py
from django.contrib import admin
from django.urls import path
from core import views
from django.conf import settings
from django.conf.urls.static import static

urlpatterns = [
    path('upload', views.upload, name='upload'),
    path('admin/', admin.site.urls),
]

if settings.DEBUG:
    urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)

API.py (this is the third party script)

import numpy as np
import argparse
import time
import cv2
import os
import face_recognition
from django.core.management.base import BaseCommand

class Command(BaseCommand):
help = 'A description of your command'

def add_arguments(self, parser):
    parser.add_argument('--image', dest='image', required=True, help='the url to process')
    parser.add_argument('--confidence', dest='confidence', default=0.5, required=False, type=float,  help='confidence')
    parser.add_argument('--threshold', dest='threshold', default=0.3, required=False, type=float, help='threshold')

def handle(self, *args, **options):
    image = options['image']
    #print(image)
    confidencearg = options['confidence']
    thresholdarg = options['threshold']


    process_this_frame = True
    known_face_encodings = []
    known_face_names = []

    """ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True,
                    help="path to input image")
    ap.add_argument("-y", "--yolo", required=True,
                    help="base path to YOLO directory")
    ap.add_argument("-c", "--confidence", type=float, default=0.5,
                    help="minimum probability to filter weak detections")
    ap.add_argument("-t", "--threshold", type=float, default=0.3,
                    help="threshold when applying non-maxima suppression")
    args = vars(ap.parse_args())
    print(args["image"], args["confidence"], args["threshold"])"""

    Labels = []
    if os.path.exists("yolo-coco/coco.names"):
        with open("yolo-coco/coco.names") as k:
            Labels = k.read().strip().split("\n")

    np.random.seed(42)
    COLORS = np.random.randint(0, 255, size=(len(Labels), 3),
                               dtype="uint8")

    weightsPath = "yolo-coco/yolov3.weights"
    configPath = "yolo-coco/yolov3.cfg"

    print("[INFO] loading YOLO from disk...")
    net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

    imagex = cv2.imread(image)
    print(imagex)

    """frame = image
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
    rgb_small_frame = small_frame[:, :, ::-1]
    if process_this_frame:
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
    
        face_names = []
        for face_encoding in face_encodings:
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
            name = "Unknown"
    
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]
            face_names.append(name)
    
        process_this_frame = not process_this_frame
    
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4
            cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2)
            cv2.rectangle(image, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(image, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)"""


    (H, W) = imagex.shape[:2]

    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]


    blob = cv2.dnn.blobFromImage(imagex, 1 / 255.0, (416, 416),
                                 swapRB=True, crop=False)
    net.setInput(blob)
    start = time.time()
    layerOutputs = net.forward(ln)
    end = time.time()

    print("[INFO] YOLO took {:.6f} seconds".format(end - start))

    boxes = []
    confidences = []
    classIDs = []

    for output in layerOutputs:
        for detection in output:
            scores = detection[5:]
            classID = np.argmax(scores)
            confidence = scores[classID]

            if confidence > confidencearg:
                box = detection[0:4] * np.array([W, H, W, H])
                (centerX, centerY, width, height) = box.astype("int")

                x = int(centerX - (width / 2))
                y = int(centerY - (height / 2))

                boxes.append([x, y, int(width), int(height)])
                confidences.append(float(confidence))
                classIDs.append(classID)

                idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidencearg,
                                        thresholdarg)

                if len(idxs) > 0:
                    for i in idxs.flatten():
                        (x, y) = (boxes[i][0], boxes[i][1])
                        (w, h) = (boxes[i][2], boxes[i][3])

                        color = [int(c) for c in COLORS[classIDs[i]]]
                        cv2.rectangle(imagex, (x, y), (x + w, y + h), color, 2)
                        text = "{}: {:.4f}".format(Labels[classIDs[i]], confidences[i])
                        cv2.putText(imagex, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.5, color, 2)

    cv2.imshow("Image", imagex)
    cv2.waitKey(0)
    # cv2.imwrite("/Users/karanshah/Desktop/new.jpg", image)

settings.py

57%20PM

0 Likes