Advertisement
Guest User

Untitled

a guest
Jul 23rd, 2019
94
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.53 KB | None | 0 0
  1. #!/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
  4.  
  5. """
  6. /***************************************************************************
  7. *
  8. * Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
  9. * @file model.py
  10. * @author lipeihan01(lipeihan01@baidu.com)
  11. * @date 2019/5/23 10:31:21
  12. * @brief this file is DEMARK CE WITH TENSORRT CONFIG
  13. *
  14. **************************************************************************/
  15. """
  16. import threading
  17. import numpy as np
  18. import paddle
  19. import paddle.fluid as fluid
  20. import time
  21.  
  22. def Set_Config(model_path):
  23. target_model_path = model_path
  24. prog_file = "{}/model".format(target_model_path)
  25. params_file = "{}/params".format(target_model_path)
  26. config = fluid.core.AnalysisConfig(prog_file, params_file)
  27. config.enable_use_gpu(100, 0) # set GPU memory and gpu id
  28. config.enable_tensorrt_engine(1 << 30, 1, precision_mode=fluid.core.AnalysisConfig.Precision.Int8, use_static=False, use_calib_mode=True)
  29. # run trt fp32
  30. #config.enable_tensorrt_engine(workspace_size=1 << 30, max_batch_size=1, precision_mode=fluid.core.AnalysisConfig.Precision.Float32)
  31. return config
  32.  
  33.  
  34. def load_fake_data(batch_size=1):
  35. """
  36. Load data
  37. """
  38. channels = 3
  39. height = 300
  40. width = 300
  41. input_num = channels * height * width * batch_size
  42. input_data = [[0 for x in range(input_num)]]
  43.  
  44. sum_i = 0
  45. for i in range(input_num):
  46. input_data[0][i] = 1
  47. sum_i += input_data[0][i]
  48. # print("sum_i: {}".format(sum_i))
  49.  
  50. the_data = []
  51. for data in input_data:
  52. the_data += data
  53.  
  54. input_tensor = fluid.core.PaddleTensor()
  55. input_tensor.shape = [batch_size, channels, height, width]
  56. input_tensor.data = fluid.core.PaddleBuf(the_data)
  57. input_value = [input_tensor]
  58.  
  59. return input_value
  60.  
  61.  
  62. def run_inference(model_path, repeat_times=100):
  63. """
  64. run inference
  65. """
  66. config = Set_Config(model_path)
  67.  
  68. predict = fluid.core.create_paddle_predictor(config)
  69. input_value = load_fake_data()
  70. outputs = predict.run(input_value)
  71. results = outputs[0].data.float_data()
  72.  
  73. # warmup
  74. for i in range(5):
  75. outputs = predict.run(input_value)
  76. print("warm_up: {0}".format(i))
  77. t = []
  78. t1 = time.time()
  79. for i in range(repeat_times):
  80. outputs = predict.run(input_value)
  81. t2 = time.time()
  82. print (t2 - t1) * 10, " ms"
  83.  
  84. return results, t
  85.  
  86. if __name__ == '__main__':
  87. model_path = "./MobileNet_SSD_infer_model"
  88. results, t = run_inference(model_path)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement