Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from flask import Flask, jsonify, request
- from keras.models import load_model
- from keras.preprocessing.text import Tokenizer
- app = Flask(__name__)
- def get_model():
- global model
- model = load_model('E:/faculta/Anul 3/LICENTA/SentimentModel/model_weights.h5')
- print("Model loaded!")
- def preprocess_text(tweet_list):
- tokenizer = Tokenizer(num_words=3000)
- tokenizer.fit_on_texts(tweet_list)
- tweet_list = tokenizer.texts_to_sequences(tweet_list)
- tweet_list = tokenizer.sequences_to_matrix(tweet_list, mode='binary')
- return tweet_list
- print("Loading model...")
- get_model()
- @app.route('/predict', methods=['POST', 'OPTIONS'])
- def post_result():
- message = request.get_json(force=True)
- tweets = []
- for tweet in message:
- tweets.append(tweet)
- processed_tweets = preprocess_text(tweets)
- prediction = model.predict(processed_tweets).tolist()
- avg_neg = 0
- avg_pos = 0
- for pr in prediction:
- avg_neg += pr[0]
- avg_pos += pr[1]
- response = {
- 'prediction': {
- 'negative': avg_neg / len(prediction),
- 'positive': avg_pos / len(prediction)
- }
- }
- return jsonify(response)
- if __name__ == '_main_':
- app.run()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement