-
Notifications
You must be signed in to change notification settings - Fork 208
view file #39
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
sumanth2002
wants to merge
16
commits into
abhijithjadhav:python3.6
Choose a base branch
from
sumanth2002:master
base: python3.6
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
view file #39
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
…s-update Vthonte requirements update
Added contributers
@all-contributors please add @abhijitjadhav1998 for founder of the project
…dhav1998-patch-1 Update README.md
…ibutors/add-abhijitjadhav1998 docs: add abhijitjadhav1998 as a contributor for projectManagement
…ibutors/add-vthonte docs: add vthonte as a contributor for maintenance
Looks good to me
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
The view file has a if condition where if the device is "gpu" use it else use "cpu", it should be "cuda" and not "gpu"
`from django.shortcuts import render, redirect
import torch
import torchvision
from torchvision import transforms, models
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import face_recognition
from torch.autograd import Variable
import time
import sys
from torch import nn
import json
import glob
import copy
from torchvision import models
import shutil
from PIL import Image as pImage
import time
from django.conf import settings
from .forms import VideoUploadForm
index_template_name = 'index.html'
predict_template_name = 'predict.html'
about_template_name = "about.html"
im_size = 112
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
sm = nn.Softmax()
inv_normalize = transforms.Normalize(mean=-1*np.divide(mean,std),std=np.divide([1,1,1],std))
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
train_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((im_size,im_size)),
transforms.ToTensor(),
transforms.Normalize(mean,std)])
class Model(nn.Module):
class validation_dataset(Dataset):
def init(self,video_names,sequence_length=60,transform = None):
self.video_names = video_names
self.transform = transform
self.count = sequence_length
def im_convert(tensor, video_file_name):
""" Display a tensor as an image. """
image = tensor.to("cpu").clone().detach()
image = image.squeeze()
image = inv_normalize(image)
image = image.numpy()
image = image.transpose(1,2,0)
image = image.clip(0, 1)
# This image is not used
# cv2.imwrite(os.path.join(settings.PROJECT_DIR, 'uploaded_images', video_file_name+'_convert_2.png'),image*255)
return image
def im_plot(tensor):
image = tensor.cpu().numpy().transpose(1,2,0)
b,g,r = cv2.split(image)
image = cv2.merge((r,g,b))
image = image*[0.22803, 0.22145, 0.216989] + [0.43216, 0.394666, 0.37645]
image = image*255.0
plt.imshow(image.astype('uint8'))
plt.show()
def predict(model,img,path = './', video_file_name=""):
fmap,logits = model(img.to(device))
img = im_convert(img[:,-1,:,:,:], video_file_name)
params = list(model.parameters())
weight_softmax = model.linear1.weight.detach().cpu().numpy()
logits = sm(logits)
_,prediction = torch.max(logits,1)
confidence = logits[:,int(prediction.item())].item()*100
print('confidence of prediction:',logits[:,int(prediction.item())].item()*100)
return [int(prediction.item()),confidence]
def plot_heat_map(i, model, img, path = './', video_file_name=''):
fmap,logits = model(img.to(device))
params = list(model.parameters())
weight_softmax = model.linear1.weight.detach().cpu().numpy()
logits = sm(logits)
_,prediction = torch.max(logits,1)
idx = np.argmax(logits.detach().cpu().numpy())
bz, nc, h, w = fmap.shape
#out = np.dot(fmap[-1].detach().cpu().numpy().reshape((nc, hw)).T,weight_softmax[idx,:].T)
out = np.dot(fmap[i].detach().cpu().numpy().reshape((nc, hw)).T,weight_softmax[idx,:].T)
predict = out.reshape(h,w)
predict = predict - np.min(predict)
predict_img = predict / np.max(predict)
predict_img = np.uint8(255predict_img)
out = cv2.resize(predict_img, (im_size,im_size))
heatmap = cv2.applyColorMap(out, cv2.COLORMAP_JET)
img = im_convert(img[:,-1,:,:,:], video_file_name)
result = heatmap * 0.5 + img0.8*255
Saving heatmap - Start
heatmap_name = video_file_name+"heatmap"+str(i)+".png"
image_name = os.path.join(settings.PROJECT_DIR, 'uploaded_images', heatmap_name)
cv2.imwrite(image_name,result)
Saving heatmap - End
result1 = heatmap * 0.5/255 + img*0.8
r,g,b = cv2.split(result1)
result1 = cv2.merge((r,g,b))
return image_name
Model Selection
def get_accurate_model(sequence_length):
model_name = []
sequence_model = []
final_model = ""
list_models = glob.glob(os.path.join(settings.PROJECT_DIR, "models", "*.pt"))
ALLOWED_VIDEO_EXTENSIONS = set(['mp4','gif','webm','avi','3gp','wmv','flv','mkv'])
def allowed_video_file(filename):
#print("filename" ,filename.rsplit('.',1)[1].lower())
if (filename.rsplit('.',1)[1].lower() in ALLOWED_VIDEO_EXTENSIONS):
return True
else:
return False
def index(request):
if request.method == 'GET':
video_upload_form = VideoUploadForm()
if 'file_name' in request.session:
del request.session['file_name']
if 'preprocessed_images' in request.session:
del request.session['preprocessed_images']
if 'faces_cropped_images' in request.session:
del request.session['faces_cropped_images']
return render(request, index_template_name, {"form": video_upload_form})
else:
video_upload_form = VideoUploadForm(request.POST, request.FILES)
if video_upload_form.is_valid():
video_file = video_upload_form.cleaned_data['upload_video_file']
video_file_ext = video_file.name.split('.')[-1]
sequence_length = video_upload_form.cleaned_data['sequence_length']
video_content_type = video_file.content_type.split('/')[0]
if video_content_type in settings.CONTENT_TYPES:
if video_file.size > int(settings.MAX_UPLOAD_SIZE):
video_upload_form.add_error("upload_video_file", "Maximum file size 100 MB")
return render(request, index_template_name, {"form": video_upload_form})
def predict_page(request):
if request.method == "GET":
# Redirect to 'home' if 'file_name' is not in session
if 'file_name' not in request.session:
return redirect("ml_app:home")
if 'file_name' in request.session:
video_file = request.session['file_name']
if 'sequence_length' in request.session:
sequence_length = request.session['sequence_length']
path_to_videos = [video_file]
video_file_name = os.path.basename(video_file)
video_file_name_only = os.path.splitext(video_file_name)[0]
# Production environment adjustments
if not settings.DEBUG:
production_video_name = os.path.join('/home/app/staticfiles/', video_file_name.split('/')[3])
print("Production file name", production_video_name)
else:
production_video_name = video_file_name
def about(request):
return render(request, about_template_name)
def handler404(request,exception):
return render(request, '404.html', status=404)
def cuda_full(request):
return render(request, 'cuda_full.html')
`