Advertisement
Guest User

Untitled

a guest
Nov 21st, 2018
196
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.02 KB | None | 0 0
  1. import os
  2. import time
  3. import feedparser
  4. import savepagenow
  5. import archiveis
  6. import praw
  7. import urllib.request
  8. import io
  9. import random
  10. from PIL import Image
  11. from twython import Twython
  12. import yapi
  13.  
  14. DATABASE_URL = os.environ['DATABASE_URL']
  15. conn = psycopg2.connect(DATABASE_URL, sslmode='require')
  16.  
  17. TWITTER_API_KEY = os.environ['TWITTER_API_KEY']
  18. TWITTER_API_SECRET = os.environ['TWITTER_API_SECRET']
  19. TWITTER_ACCESS_TOKEN = os.environ['TWITTER_ACCESS_TOKEN']
  20. TWITTER_ACCESS_SECRET = os.environ['TWITTER_ACCESS_SECRET']
  21.  
  22. YOUTUBE_API_KEY = os.environ['YOUTUBE_API_KEY']
  23.  
  24. twitter = Twython(TWITTER_API_KEY, TWITTER_API_SECRET, TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_SECRET)
  25.  
  26. r = praw.Reddit(
  27. client_id=os.environ['CLIENT_ID'],
  28. client_secret=os.environ['CLIENT_SECRET'],
  29. username=os.environ['REDDIT_USER'],
  30. password=os.environ['REDDIT_PASS'],
  31. user_agent='agent v 0.1 by /u/bolnossaurorex - r/brasilivre')
  32.  
  33. yt = yapi.YoutubeAPI(YOUTUBE_API_KEY)
  34.  
  35. formats = ['.jpg','.png','.jpeg', '.gif', '.gifv']
  36. twitter_queue = []
  37. news_table = []
  38. video_table = []
  39. cleanup_counter = 0
  40.  
  41. def get_news(link, news_table):
  42. feed = feedparser.parse(link)
  43. halflength = len(feed['entries']) // 2
  44. if halflength > 3:
  45. halflength = 3
  46. index = random.randint(0,halflength)
  47. title = feed['entries'][index]['title']
  48. url = feed['entries'][index]['link']
  49. if url not in [ x[1] for x in news_table] :
  50. news_table.insert(0, [title,url,False,False] )
  51. return news_table
  52.  
  53. def get_videos(api, channel_id, video_table, size=50):
  54. result = api.video_search_in_channel('',channel_id,order='date',max_results=size)
  55. length = len(result.items) // 2
  56. index = random.randint(0,length)
  57. try:
  58. yt_id = result.items[index].id.videoId
  59. title = result.items[index].snippet.title
  60. except:
  61. yt_id = result.items[0].id.videoId
  62. title = result.items[0].snippet.title
  63. url = 'https://www.youtube.com/watch?v=' + yt_id
  64. if url not in [ x[1] for x in video_table ]:
  65. video_table.insert(0, [title,url,False,False] )
  66. return video_table
  67.  
  68.  
  69. while True:
  70.  
  71. ### TWITTER
  72. try:
  73. for index, submission in enumerate(r.subreddit('brasilivre').rising(limit=2)):
  74. title = submission.title
  75. url = submission.url
  76. s_id = submission.id
  77. if s_id in twitter_queue:
  78. continue
  79. twitter_queue.insert(0, s_id)
  80. twitter_queue = twitter_queue[0:100]
  81. if any(f in url for f in formats):
  82. filename = io.BytesIO(urllib.request.urlopen(url).read())
  83. img = Image.open(filename)
  84. img_io = io.BytesIO()
  85. img.save(img_io, format='JPEG')
  86. img_io.seek(0)
  87. response = twitter.upload_media(media=img_io)
  88. twitter.update_status(status=title, media_ids=[response['media_id']])
  89. print(str(index) + ' TWITTER -- ' + str(title) )
  90. except:
  91. print('Twitter failure')
  92. pass
  93.  
  94. # ### ARQUIVAMENTO
  95. # for index, submission in enumerate(r.subreddit('brasilivre').new(limit=1)):
  96. # text = submission.selftext
  97. # url = submission.url
  98. # title = submission.title
  99. # if len(text) != 0:
  100. # continue
  101. # blocked = ['youtube.com', 'youtu.be', 'reddit.com','m.reddit','redd.it']
  102. # blocked_title = ['@']
  103. # if any(b in url for b in blocked):
  104. # print( str(index) + ' blocked ' + url )
  105. # continue
  106. # if any(b in title for b in blocked_title):
  107. # print( str(index) + ' blocked ' + title )
  108. # continue
  109. # for comment in submission.comments:
  110. # if '[Versão arquivada - archive.is]' in comment.body:
  111. # break
  112. # else:
  113. # string = '[Versão arquivada - archive.is]({0}) \n [Versão arquivada - Wayback Machine]({1}) \n [Versão para celular - Outline.com]({2}) \n Não quer que eu comente nos seus tópicos? Coloque o caractere @ no título.'
  114. # try:
  115. # archiveis_url = archiveis.capture(url)
  116. # except:
  117. # archiveis_url = ''
  118. # pass
  119. # try:
  120. # wayback_url = savepagenow.capture_or_cache(url)[0]
  121. # except:
  122. # wayback_url = ''
  123. # pass
  124. # outline_url = 'https://outline.com/' + url
  125. # new_string = string.format(archiveis_url, wayback_url, outline_url)
  126. # try:
  127. # submission.reply(new_string)
  128. # print( str(index) + ' -- Success -- ' + url )
  129. # except:
  130. # print( str(index) + ' -- Failure -- ' + url )
  131.  
  132.  
  133.  
  134. ### CHECAR ANTAGONISTA
  135. try:
  136. feed = feedparser.parse('https://www.oantagonista.com/rss')
  137. for index,item in enumerate(list(feed.items())[1][1]):
  138. title = item['title']
  139. link = item['links'][0]['href']
  140. if '/tv/' in link or 'urgente' in title.lower() or title.isupper() or 'vídeo' in title.lower():
  141. for submission in r.subreddit('brasilivre').new(limit=50):
  142. if submission.url == link:
  143. break
  144. else:
  145. print( str(index) + ' ' + 'Posting. - O Antagonista ' + title)
  146. r.subreddit('brasilivre').submit(title, url=link)
  147. except:
  148. print('O Antagonista - Feed Failure')
  149.  
  150.  
  151. # ### DIVERSIFICAR POSTAGENS
  152. #
  153. # counter = 0
  154. #
  155. # myself = os.environ['REDDIT_USER']
  156. #
  157. # for index, submission in enumerate(r.subreddit('brasilivre').new(limit=10)):
  158. # author = submission.author
  159. # if author == myself:
  160. # counter += 1
  161. # if counter == 2:
  162. # break
  163. # print('counter ',counter)
  164. #
  165. # # acumulação de feeds
  166. # if counter < 2:
  167. #
  168. # feeds = [ 'https://noticias.r7.com/tecnologia-e-ciencia/feed.xml', 'http://g1.globo.com/dynamo/planeta-bizarro/rss2.xml', 'http://g1.globo.com/dynamo/tecnologia/rss2.xml', 'http://g1.globo.com/dynamo/pop-arte/rss2.xml', 'http://g1.globo.com/dynamo/educacao/rss2.xml', 'http://rss.uol.com.br/feed/jogos.xml' ]
  169. #
  170. # videos = [
  171. # 'UCW9jLtlONRp7W-AK9F8M66Q', #Luis Felipe Pondé
  172. # 'UCP6L9TPS3pHccVRiDB_cvqQ', #Padre Paulo Ricardo
  173. # 'UCAL3JXZSzSm8AlZyD3nQdBA', #Primitive Technology
  174. # 'UC2ip-TMkPtTsb2eCIHz29rQ', #Eduardo Marinho
  175. # 'UCiEk4xHBbz0hZNIBBpowdYQ', #Jimmy Diresta
  176. # 'UCBvc7pmUp9wiZIFOXEp1sCg', #Demolition Ranch
  177. # 'UCJqOdpqndf1MPequlvDgGkA', #Tradutores de Direita
  178. # 'UCKSfUkYtc3wGCSSOoBbNUHA', #Maro Schweder
  179. # 'UC1-WVziv_VW5lEoGg4_tmng', #Menino Prendado
  180. # 'UC84itY69Ol2ASqv_tdb37JQ', #Area 51
  181. # 'UCjHDaKzT0-r3QaxFx8aRF3Q', #Makarov Tecnologia
  182. # 'UC-NwgkrLPYmzM-xoLr2GX-Q', #Ideias Radicais
  183. # 'UCcq1Xu3SKcwE1CpzGi-N68g', #Canal de HQs
  184. # 'UCEWOoncsrmirqnFqxer9lmA', #The Noite com Danillo Gentilli
  185. # 'UCQRPDZMSwXFEDS67uc7kIdg', #Buenas Ideias
  186. # 'UCL_f53ZEJxp8TtlOkHwMV9Q', #Jordan B Peterson
  187. # 'UCl79BVUfEZ830vH76L12ChA', #Quadro em Branco
  188. # 'UCFzODWkN1gXm4ryOgLK8Deg', #Ludoviajante
  189. # 'UCZq_CYXRoRjKqidapMPujaQ', #Entreplanos
  190. # 'UClu474HMt895mVxZdlIHXEA', #Nerdologia
  191. # 'UCpCJ1AS4afAWOJ5pNMFh4Dw', #Danielle Noce
  192. # 'UCE3mTpIMy1QSZ9FEqm8Fg7g', #Amigos por carros
  193. # 'UCGBIIPnw0AYM3BFsmTsjeAw', #Acelerados
  194. # 'UC_mSfchV-fgpPy-vuwML8_A', #Gustavo Cerbasi
  195. # 'UC8mDF5mWNGE-Kpfcvnn0bUg', #Me Poupe
  196. # 'UCxDFRhF3Y1A_Gd0-cF8gbqQ', #Mundo Militar
  197. # 'UCPyLXf8J1q1iNsMVa6S5siA', #Gosto de Armas
  198. # 'UCh7TUTXojlE8vRtb-EnuDzw', #Metaforando
  199. # 'UC70YG2WHVxlOJRng4v-CIFQ', #Gabriel Pato
  200. # 'UCj8_DeCUB0uL4cJwFmiQfMQ' #Hashtag Sal
  201. # 'UCSyG9ph5BJSmPRyzc_eGC4g', #ancap.su
  202. # 'UCKaLsYNQcHBE3YJmxkF07cQ', #legendas libertarias
  203. # 'UCBAPR-VRbL4InLKWOQ5GCMA', #sociedade da virtude
  204. # 'UCl79BVUfEZ830vH76L12ChA', #quadro em branco
  205. # ]
  206. #
  207. # random.shuffle(feeds)
  208. # random.shuffle(videos)
  209. #
  210. # for feed in feeds:
  211. # try:
  212. # news_table = get_news(feed, news_table)
  213. # except:
  214. # pass
  215. #
  216. # for channel in videos:
  217. # try:
  218. # video_table = get_videos(yt, channel, video_table)
  219. # except:
  220. # pass
  221. #
  222. # news_table = news_table[0:1000]
  223. # video_table = video_table[0:1000]
  224. #
  225. # coin_toss = random.randint(0,1)
  226. # if coin_toss == 1:
  227. # chosen_table = news_table
  228. # elif coin_toss == 0:
  229. # chosen_table = video_table
  230. #
  231. # print('Coin toss ',coin_toss)
  232. #
  233. # for row in chosen_table:
  234. # if row[3] == False:
  235. # title = row[0]
  236. # link = row[1]
  237. # r.subreddit('brasilivre').submit(title,url=link)
  238. # print('Success ', link)
  239. # row[3] = True
  240. # break
  241. #
  242. # for index,submission in enumerate(r.subreddit('brasilivre').rising(limit=3)):
  243. #
  244. # url = submission.url
  245. #
  246. # table = [ x[1] for x in news_table]
  247. # if url in table:
  248. # for table_index,item in enumerate(news_table):
  249. # if item[1] == url:
  250. # news_table[table_index][2] = True
  251. # break
  252. #
  253. # table = [ x[1] for x in video_table]
  254. # if url in table:
  255. # for table_index,item in enumerate(video_table):
  256. # if item[1] == url:
  257. # video_table[table_index][2] = True
  258. # break
  259. #
  260. # time.sleep(30)
  261. # cleanup_counter += 1
  262. #
  263. #
  264. # ### LIMPAR POSTS DE KARMA BAIXO
  265. #
  266. # if cleanup_counter % 120 == 0:
  267. # cleanup_counter = 0
  268. # myself = os.environ['REDDIT_USER']
  269. # for index,submission in enumerate(r.subreddit('brasilivre').new(limit=50)):
  270. # url = submission.url
  271. # author = submission.author
  272. #
  273. # table = [ x[1] for x in news_table ]
  274. # if url in table:
  275. # for table_index, item in enumerate(news_table):
  276. # if item[1] == url and item[2] == False and author == myself:
  277. # submission.delete()
  278. #
  279. # table = [ x[1] for x in video_table ]
  280. # if url in table:
  281. # for table_index, item in enumerate(video_table):
  282. # if item[1] == url and item[2] == False and author == myself:
  283. # submission.delete()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement