Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- # Hossam Amer
- # Run using this way: python3 visualize_featureMaps.py
- # Inception image recognition attempt v1
- import tensorflow as tf
- import numpy as np
- import re
- import os
- import time
- from tkinter import *
- import tkinter.filedialog
- import matplotlib.pyplot as plt
- import logging
- # Video capture and convert rgb
- from video_capture import VideoCaptureYUV
- import cv2
- # Node look up
- from node_lookup import NodeLookup
- import time
- # for fetching files
- import glob
- import math
- from random import randrange
- import errno
- ## CODE FROM LAOD DATA
- # path_jpeg = '/Volumes/work/workspace/Visualization_analysis_jpg_hevc/visualize/visualize_inception_featureMaps/all_new_graphs/PSNR_point_of_view'
- # path_hevc = '/Volumes/work/workspace/Visualization_analysis_jpg_hevc/visualize/visualize_inception_featureMaps/all_new_graphs/PSNR_point_of_view'
- path_jpeg = './all_new_graphs/PSNR_Point_View/'
- path_hevc = './all_new_graphs/PSNR_Point_View/'
- import numpy as np
- import os
- import sys
- flag = 0
- ## get idx and bin num from input
- #img_idx = int(sys.argv[1])
- img_idx = imgID = int(sys.argv[1])
- if flag :
- bin_num = 75
- jpg_qf_idx = int(sys.argv[2])
- hevc_qp_idx = int(sys.argv[3])
- from openpyxl import load_workbook
- path_to_file = '/home/h2amer/work/workspace/Visualization_analysis_jpg_hevc/visualize/visualize_inception_featureMaps/IV3-Qp-All_1_50000_HEVC.xlsx'
- wb = load_workbook(filename=path_to_file, read_only=True, data_only=True)
- ws = wb['Sheet1']
- sheet = wb.get_sheet_by_name('Sheet1')
- N = 50000
- max_row_limit = N
- rank_hevc = np.zeros((N , 27))
- rank_jpg = np.zeros((N,21))
- # np.save('rank_jpg', rank_jpg )
- RANK_HEVC = np.load('rank_hevc.npy')
- RANK_JPG = np.load('rank_jpg.npy')
- print ('img id is' , img_idx)
- print('hevc rank ' , RANK_HEVC[ img_idx - 1 , hevc_qp_idx] )
- print('jpg rank ' , RANK_JPG[ img_idx - 1 , jpg_qf_idx] )
- print('jpg qf_idx is' , jpg_qf_idx )
- else:
- bin_num = int(sys.argv[2])
- QP_idx_jpg = np.load(os.path.join( path_jpeg, 'QP_idx_jpg.npy'))
- QP_idx_hevc = np.load(os.path.join( path_hevc , 'QP_idx_hevc.npy'))
- img_qfs_hevc = QP_idx_hevc[: , bin_num]
- img_qfs_jpg = QP_idx_jpg[: , bin_num]
- N = 50000
- max_row_limit = N
- rank_hevc = np.zeros((N , 27))
- rank_jpg = np.zeros((N,21))
- # np.save('rank_jpg', rank_jpg )
- RANK_HEVC = np.load('rank_hevc.npy')
- RANK_JPG = np.load('rank_jpg.npy')
- print ('img id is' , img_idx)
- hevc_qp_idx = int( img_qfs_hevc[img_idx -1] )
- jpg_qf_idx = int( img_qfs_jpg[img_idx -1])
- imgID = img_idx
- print('hevc rank ' , RANK_HEVC[ img_idx - 1 , int(img_qfs_hevc[img_idx -1]) ])
- print('jpg rank ' , RANK_JPG[ img_idx - 1 , int(img_qfs_jpg[img_idx -1] )] )
- print('jpg qf_idx is' , img_qfs_jpg[img_idx -1] )
- #################################################################################3
- # needs more work
- #MODEL_PATH = '/Users/hossam.amer/7aS7aS_Works/work/jpeg_ml_research/inceptionv3/inception_model'
- MODEL_PATH = './inception_model'
- # # YUV Path
- # PATH_TO_RECONS = '/Volumes/MULTICOMHD2/set_yuv/Seq-RECONS/'
- # # JPEG Path
- # path_to_valid_images = '/Volumes/MULTICOMHD2/validation_original/';
- # path_to_valid_QF_images = '/Volumes/MULTICOMHD2/validation_generated_QF/';
- MAIN_PATH = '/Volumes/MULTICOM102/103_HA/MULTICOM103/set_yuv/'
- # YUV Path
- PATH_TO_RECONS = os.path.join(MAIN_PATH, 'Seq-RECONS-ffmpeg/')
- # '/Volumes/MULTICOMHD2/set_yuv/Seq-RECONS/'
- # JPEG Path
- path_to_valid_images = '/media/h2amer/ADATA HD710/validation_generated_QF_0_5_100/'
- #'/Volumes/MULTICOMHD2/validation_original/';
- #path_to_valid_QF_images = '/media/h2amer/ADATA HD710/validation_generated_QF_0_5_100/'
- # path_to_valid_QF_images = '/media/h2amer/MULTICOM101/jpeg_data/validation_generated_QF/'
- #'/Volumes/MULTICOMHD2/validation_generated_QF/';
- # path_to_valid_QF_images = '/Volumes/MULTICOM101/jpeg_data/validation_generated_QF/'
- path_to_valid_QF_images = '/Volumes/MULTICOM-104/validation_generated_QF/'
- # Main
- # Print ops:
- # print_ops(sess)
- path = '/home/h2amer/work/workspace/Visualization_analysis_jpg_hevc/visualize/visualize_inception_featureMaps/analysis2/'
- layerID = int(sys.argv[3])
- featureMapIdx = -73
- isGrayScaleNorm = True
- #读取训练好的Inception-v3模型来创建graph
- def create_graph():
- # the class that's been created from the textual definition in graph.proto
- #with tf.gfile.FastGFile('./inception_model/inception_v3_2016_08_28_frozen.pb', 'rb') as f:
- with tf.gfile.FastGFile(MODEL_PATH + '/classify_image_graph_def.pb', 'rb') as f:
- graph_def = tf.GraphDef()
- graph_def.ParseFromString(f.read())
- tf.import_graph_def(graph_def, name='')
- def print_ops(sess):
- constant_ops = [op for op in sess.graph.get_operations() if op.type == "Const"]
- for constant_op in constant_ops:
- print(constant_op.name)
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Hide the warning information from Tensorflow - annoying...
- def show_image(imgID, QF, image_data, isCast = True):
- # Parse the YUV and convert it into RGB
- # original_img_ID = imgID
- # imgID = str(imgID).zfill(8)
- # shard_num = round(original_img_ID/10000);
- # folder_num = math.ceil(original_img_ID/1000);
- original_img_ID = imgID
- print('SHOW IMAGE: ', isCast)
- imgID = str(imgID).zfill(8)
- shard_num = math.floor((original_img_ID - 1) / 10000)
- folder_num = math.ceil(original_img_ID/1000)+1;
- if (((original_img_ID-1)/1000.0)==folder_num-1):
- folder_num = (original_img_ID-1)/1000
- if (folder_num == original_img_ID/1000 ):
- folder_num = folder_num + 1
- if ((folder_num-1)*1000==original_img_ID):
- folder_num = folder_num - 1
- if not isCast:
- if QF == 110:
- image = path_to_valid_images + str(folder_num) + '/ILSVRC2012_val_' + imgID + '.JPEG'
- figure_title = 'ILSVRC2012_val_' + imgID + '.JPEG'
- else:
- # shard_num = math.floor((original_img_ID - 1) / 10000)
- # folder_num = math.ceil(original_img_ID/1000)
- shard_num = math.floor((original_img_ID - 1) / 10000)
- folder_num = math.ceil(original_img_ID/1000)+1;
- if (((original_img_ID-1)/1000.0)==folder_num-1):
- folder_num = (original_img_ID-1)/1000
- print('here1')
- if (folder_num == original_img_ID/1000 ):
- folder_num = folder_num + 1
- if ((folder_num-1)*1000==original_img_ID):
- folder_num = folder_num - 1
- #image = path_to_valid_QF_images + str(folder_num) + '/ILSVRC2012_val_' + imgID + '-QF-' + str(QF) + '.JPEG'
- image = path_to_valid_QF_images + 'shard-' + str(int(shard_num)) + '/' + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '-QF-' + str(QF) + '.JPEG'
- figure_title = 'ILSVRC2012_val_' + imgID + '-QF-' + str(QF) + '.JPEG'
- print('Show image JPEG: ', image)
- image_data = cv2.imread(image)
- print('Save JPEG')
- filename = './'+imgID+'/ILSVRC2012_val_' + imgID + '-QF-' + str(QF) +'.bmp'
- savefile_ex(filename)
- cv2.imwrite(filename, image_data)
- # cv2.imwrite('/Users/ahamsala/Documents/7.visualize_code/Visualization_analysis_jpg_hevc/image1.bmp', image_data)
- print(image)
- else:
- path_to_recons = PATH_TO_RECONS
- # Get files list to fetch the correct name
- print('PATH TO GLOB: ', path_to_recons + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '*.yuv')
- filesList = glob.glob(path_to_recons + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '*.yuv')
- name = filesList[0].split('/')[-1]
- rgbStr = name.split('_')[5]
- width = int(name.split('_')[-4])
- height = int(name.split('_')[-3])
- is_gray_str = name.split('_')[-2]
- figure_title = 'ILSVRC2012_val_' + imgID + '_' + str(width) + '_' + str(height) + '_' + rgbStr + '_' + str(QF) + '_1.yuv'
- image = path_to_recons + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '_' + str(width) + '_' + str(height) + '_' + rgbStr + '_' + str(QF) + '.yuv'
- print('Save: ', image)
- size = (height, width)
- videoObj = VideoCaptureYUV(image, size, isGrayScale=is_gray_str.__contains__('Y'))
- ret, yuv, rgb = videoObj.getYUVAndRGB()
- image_data = rgb
- print('Save HEVC')
- filename = './'+imgID+'/ILSVRC2012_val_' + imgID + '_' + str(width) + '_' + str(height) + '_' + rgbStr + '_' + str(QF) +'.bmp'
- savefile_ex(filename)
- cv2.imwrite(filename, image_data)
- print(image)
- # plt.figure(100*QF)
- # plt.imshow(image_data)
- # plt.suptitle(figure_title, fontsize=16)
- def get_image_data(imgID, QF, isCast = True):
- # Parse the YUV and convert it into RGB
- original_img_ID = imgID
- imgID = str(imgID).zfill(8)
- shard_num = math.floor((original_img_ID - 1) / 10000)
- folder_num = math.ceil(original_img_ID/1000)+1;
- if (((original_img_ID-1)/1000.0)==folder_num-1):
- folder_num = (original_img_ID-1)/1000
- print('here1')
- if (folder_num == original_img_ID/1000 ):
- folder_num = folder_num + 1
- if ((folder_num-1)*1000==original_img_ID):
- folder_num = folder_num - 1
- # shard_num = round(original_img_ID/10000);
- # folder_num = math.ceil(original_img_ID/1000)+1;
- if isCast:
- path_to_recons = PATH_TO_RECONS
- # Get files list to fetch the correct name
- filesList = glob.glob(path_to_recons + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '*.yuv')
- print(path_to_recons + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '*.yuv')
- name = filesList[0].split('/')[-1]
- rgbStr = name.split('_')[5]
- width = int(name.split('_')[-4])
- height = int(name.split('_')[-3])
- is_gray_str = name.split('_')[-2]
- image = path_to_recons + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '_' + str(width) + '_' + str(height) + '_' + rgbStr + '_' + str(QF) + '.yuv'
- figure_title = 'ILSVRC2012_val_' + imgID + '_' + str(width) + '_' + str(height) + '_' + rgbStr + '_' + str(QF) + '_1.yuv'
- print(image)
- size = (height, width) # height and then width
- videoObj = VideoCaptureYUV(image, size, isGrayScale=is_gray_str.__contains__('Y'))
- ret, yuv, rgb = videoObj.getYUVAndRGB()
- image_data = rgb
- else:
- if QF == 110:
- image = path_to_valid_images + str(folder_num) + '/ILSVRC2012_val_' + imgID + '.JPEG'
- figure_title = 'ILSVRC2012_val_' + imgID + '.JPEG'
- else:
- # shard_num = math.floor((original_img_ID - 1) / 10000)
- # folder_num = math.ceil(original_img_ID/1000)
- shard_num = math.floor((original_img_ID - 1) / 10000)
- folder_num = math.ceil(original_img_ID/1000)+1;
- if (((original_img_ID-1)/1000.0)==folder_num-1):
- folder_num = (original_img_ID-1)/1000
- print('here1')
- if (folder_num == original_img_ID/1000 ):
- folder_num = folder_num + 1
- if ((folder_num-1)*1000==original_img_ID):
- folder_num = folder_num - 1
- #image = path_to_valid_QF_images + str(folder_num) + '/ILSVRC2012_val_' + imgID + '-QF-' + str(QF) + '.JPEG'
- image = path_to_valid_QF_images + 'shard-' + str(int(shard_num)) + '/' + str(int(folder_num)) + '/ILSVRC2012_val_' + imgID + '-QF-' + str(QF) + '.JPEG'
- figure_title = 'ILSVRC2012_val_' + imgID + '-QF-' + str(QF) + '.JPEG'
- print(image)
- image_data = tf.gfile.FastGFile(image, 'rb').read()
- return image_data, figure_title
- def plot(feature_maps, featureMapIdx, figure_title, isCast, imgID, bin_num):
- K = feature_maps.shape[2]
- nRows = K//8
- nCols = K//nRows
- if featureMapIdx < 0:
- fig, ax = plt.subplots(nrows=nRows, ncols=nCols, figsize=(10, 5))
- plt.figure(figureID)
- for irow, row in enumerate(ax):
- for icol, col in enumerate(row):
- idx = irow + icol * nRows
- if idx >= K:
- continue
- m = feature_maps[:, :, idx]
- if isGrayScaleNorm:
- A = np.double(m)
- out = np.zeros(A.shape, np.double)
- m = cv2.normalize(A, out, 255.0, 0.0, cv2.NORM_MINMAX)
- col.imshow(m, cmap='gray', vmin=0.0, vmax=255.0)
- else:
- col.imshow(m)
- col.axis('off')
- else:
- plt.figure(figureID)
- m = feature_maps[:, :, featureMapIdx]
- if isGrayScaleNorm:
- A = np.double(m)
- out = np.zeros(A.shape, np.double)
- m = cv2.normalize(A, out, 255.0, 0.0, cv2.NORM_MINMAX)
- plt.imshow(m, cmap='gray', vmin=0.0, vmax=255.0)
- else:
- plt.imshow(m)
- plt.axis('off')
- figure_title = str(featureMapIdx) + ')' + figure_title
- plt.suptitle(figure_title, fontsize=16)
- plt.subplots_adjust(wspace=0.0, hspace=0.0)
- if isCast:
- filename = './'+str(imgID).zfill(8)+'/layerID_'+str(layerID)+'_'+str(bin_num)+'_'+str(imgID)+'/'+str(imgID) + '_' + str(bin_num) + '_hevc.png'
- savefile_ex(filename)
- plt.savefig(filename, dpi=600)
- # plt.savefig(str(imgID) + '_' + str(bin_num) + '_hevc.png', dpi=600)
- else:
- filename = './'+str(imgID).zfill(8)+'/layerID_'+str(layerID)+'_'+str(bin_num)+'_'+str(imgID)+'/'+str(imgID) + '_' + str(bin_num) + '_jpg.png'
- savefile_ex(filename)
- plt.savefig(filename, dpi=600)
- # plt.savefig(str(imgID) + '_' +str(bin_num) + '_jpg.png', dpi=600)
- def savefile_ex(filename):
- if not os.path.exists(os.path.dirname(filename)):
- try:
- os.makedirs(os.path.dirname(filename))
- except OSError as exc: # Guard against race condition
- if exc.errno != errno.EEXIST:
- raise
- # Visualizes feature map of a specific image in the validation set
- def visualize_image(imgID, QF, layerID = 1, figureID = 1, isCast = True, isGrayScaleNorm = False, featureMapIdx = -1, bin_num = 0):
- create_graph()
- config = tf.ConfigProto(device_count = {'GPU': 0})
- #sess = tf.Session()
- sess = tf.Session(config=config)
- # Inception-v3: last layer is output as softmax
- conv1_tensor = sess.graph.get_tensor_by_name('conv_' + str(layerID) + ':0')
- # Inception-v3: get the softmax tensor
- softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
- # Title of the figure
- figure_title = ''
- # Get image data
- image_data, figure_title = get_image_data(imgID, QF, isCast)
- #Show the image
- show_image(imgID, QF, image_data, isCast)
- if isCast:
- feature_maps = sess.run(conv1_tensor, {'Cast:0': image_data}) # n, m, 3
- predictions = sess.run(softmax_tensor,{'Cast:0': image_data}) # n, m, 3
- else:
- feature_maps = sess.run(conv1_tensor, {'DecodeJpeg/contents:0': image_data}) # n, m, 3
- predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data}) # n, m, 3
- predictions = np.squeeze(predictions)
- # ID --> English string label.
- node_lookup = NodeLookup()
- N = -1008
- # Current_rank = -1
- current_rank = -1
- #(top-5)
- top_5 = predictions.argsort()[N:][::-1]
- for rank, node_id in enumerate(top_5):
- human_string = node_lookup.id_to_string(node_id)
- score = predictions[node_id]
- # if rank < 5:
- # print('%d: %s (score = %.5f)' % (1 + rank, human_string, score))
- # if(gt_label_list[idx+1] == human_string):
- # print('%d: %s (score = %.5f)' % (1 + rank, human_string, score))
- for idx1, rank_top5 in zip(range(1,6), top_5):
- print('isHEVC: %d, Top-5 -- Node_ID: %d : %s (score = %.20f)' % (int(isCast), int(rank_top5), node_lookup.id_to_string(rank_top5), float(predictions[rank_top5])))
- # print(type(feature_maps))
- #print(feature_maps.shape)
- feature_maps = np.reshape(feature_maps, [feature_maps.shape[1], feature_maps.shape[2], feature_maps.shape[3]])
- plot(feature_maps, featureMapIdx, figure_title, isCast, imgID, bin_num)
- print('GrayScale: ', isGrayScaleNorm)
- print ('Layer ID: %d' % layerID)
- if featureMapIdx > 0:
- print('Feature Map Index: %d' % featureMapIdx)
- print('\n')
- QP = []
- QP.append(51)
- for i in range(50, -2 , -2):
- QP.append(i)
- QF = QP[hevc_qp_idx]
- figureID = 1
- # print('here',imgID)
- visualize_image(imgID, QF, layerID, figureID, True, isGrayScaleNorm ,featureMapIdx, bin_num )
- QF = [i for i in range(0,100,5)]
- QF = QF[jpg_qf_idx]
- figureID = figureID + 1
- visualize_image(imgID, QF, layerID, figureID, False, isGrayScaleNorm , featureMapIdx, bin_num)
- # QF = 110
- # figureID = figureID + 1
- # visualize_image(imgID, QF, layerID, figureID, False, isGrayScaleNorm, featureMapIdx)
- # QF = 10
- # figureID = figureID + 1
- # visualize_image(imgID, QF, layerID, figureID, False, isGrayScaleNorm)
- # figureID = figureID + 1
- # visualize_image(imgID, QF, layerID, figureID, False, isGrayScaleNorm, featureMapIdx)
- # Show plt at the end
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement