Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import os
- import time
- import feedparser
- import savepagenow
- import archiveis
- import praw
- import urllib.request
- import io
- import random
- from PIL import Image
- from twython import Twython
- import yapi
- DATABASE_URL = os.environ['DATABASE_URL']
- conn = psycopg2.connect(DATABASE_URL, sslmode='require')
- TWITTER_API_KEY = os.environ['TWITTER_API_KEY']
- TWITTER_API_SECRET = os.environ['TWITTER_API_SECRET']
- TWITTER_ACCESS_TOKEN = os.environ['TWITTER_ACCESS_TOKEN']
- TWITTER_ACCESS_SECRET = os.environ['TWITTER_ACCESS_SECRET']
- YOUTUBE_API_KEY = os.environ['YOUTUBE_API_KEY']
- twitter = Twython(TWITTER_API_KEY, TWITTER_API_SECRET, TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_SECRET)
- r = praw.Reddit(
- client_id=os.environ['CLIENT_ID'],
- client_secret=os.environ['CLIENT_SECRET'],
- username=os.environ['REDDIT_USER'],
- password=os.environ['REDDIT_PASS'],
- user_agent='agent v 0.1 by /u/bolnossaurorex - r/brasilivre')
- yt = yapi.YoutubeAPI(YOUTUBE_API_KEY)
- formats = ['.jpg','.png','.jpeg', '.gif', '.gifv']
- twitter_queue = []
- news_table = []
- video_table = []
- cleanup_counter = 0
- def get_news(link, news_table):
- feed = feedparser.parse(link)
- halflength = len(feed['entries']) // 2
- if halflength > 3:
- halflength = 3
- index = random.randint(0,halflength)
- title = feed['entries'][index]['title']
- url = feed['entries'][index]['link']
- if url not in [ x[1] for x in news_table] :
- news_table.insert(0, [title,url,False,False] )
- return news_table
- def get_videos(api, channel_id, video_table, size=50):
- result = api.video_search_in_channel('',channel_id,order='date',max_results=size)
- length = len(result.items) // 2
- index = random.randint(0,length)
- try:
- yt_id = result.items[index].id.videoId
- title = result.items[index].snippet.title
- except:
- yt_id = result.items[0].id.videoId
- title = result.items[0].snippet.title
- url = 'https://www.youtube.com/watch?v=' + yt_id
- if url not in [ x[1] for x in video_table ]:
- video_table.insert(0, [title,url,False,False] )
- return video_table
- while True:
- ### TWITTER
- try:
- for index, submission in enumerate(r.subreddit('brasilivre').rising(limit=2)):
- title = submission.title
- url = submission.url
- s_id = submission.id
- if s_id in twitter_queue:
- continue
- twitter_queue.insert(0, s_id)
- twitter_queue = twitter_queue[0:100]
- if any(f in url for f in formats):
- filename = io.BytesIO(urllib.request.urlopen(url).read())
- img = Image.open(filename)
- img_io = io.BytesIO()
- img.save(img_io, format='JPEG')
- img_io.seek(0)
- response = twitter.upload_media(media=img_io)
- twitter.update_status(status=title, media_ids=[response['media_id']])
- print(str(index) + ' TWITTER -- ' + str(title) )
- except:
- print('Twitter failure')
- pass
- # ### ARQUIVAMENTO
- # for index, submission in enumerate(r.subreddit('brasilivre').new(limit=1)):
- # text = submission.selftext
- # url = submission.url
- # title = submission.title
- # if len(text) != 0:
- # continue
- # blocked = ['youtube.com', 'youtu.be', 'reddit.com','m.reddit','redd.it']
- # blocked_title = ['@']
- # if any(b in url for b in blocked):
- # print( str(index) + ' blocked ' + url )
- # continue
- # if any(b in title for b in blocked_title):
- # print( str(index) + ' blocked ' + title )
- # continue
- # for comment in submission.comments:
- # if '[Versão arquivada - archive.is]' in comment.body:
- # break
- # else:
- # string = '[Versão arquivada - archive.is]({0}) \n [Versão arquivada - Wayback Machine]({1}) \n [Versão para celular - Outline.com]({2}) \n Não quer que eu comente nos seus tópicos? Coloque o caractere @ no título.'
- # try:
- # archiveis_url = archiveis.capture(url)
- # except:
- # archiveis_url = ''
- # pass
- # try:
- # wayback_url = savepagenow.capture_or_cache(url)[0]
- # except:
- # wayback_url = ''
- # pass
- # outline_url = 'https://outline.com/' + url
- # new_string = string.format(archiveis_url, wayback_url, outline_url)
- # try:
- # submission.reply(new_string)
- # print( str(index) + ' -- Success -- ' + url )
- # except:
- # print( str(index) + ' -- Failure -- ' + url )
- ### CHECAR ANTAGONISTA
- try:
- feed = feedparser.parse('https://www.oantagonista.com/rss')
- for index,item in enumerate(list(feed.items())[1][1]):
- title = item['title']
- link = item['links'][0]['href']
- if '/tv/' in link or 'urgente' in title.lower() or title.isupper() or 'vídeo' in title.lower():
- for submission in r.subreddit('brasilivre').new(limit=50):
- if submission.url == link:
- break
- else:
- print( str(index) + ' ' + 'Posting. - O Antagonista ' + title)
- r.subreddit('brasilivre').submit(title, url=link)
- except:
- print('O Antagonista - Feed Failure')
- # ### DIVERSIFICAR POSTAGENS
- #
- # counter = 0
- #
- # myself = os.environ['REDDIT_USER']
- #
- # for index, submission in enumerate(r.subreddit('brasilivre').new(limit=10)):
- # author = submission.author
- # if author == myself:
- # counter += 1
- # if counter == 2:
- # break
- # print('counter ',counter)
- #
- # # acumulação de feeds
- # if counter < 2:
- #
- # feeds = [ 'https://noticias.r7.com/tecnologia-e-ciencia/feed.xml', 'http://g1.globo.com/dynamo/planeta-bizarro/rss2.xml', 'http://g1.globo.com/dynamo/tecnologia/rss2.xml', 'http://g1.globo.com/dynamo/pop-arte/rss2.xml', 'http://g1.globo.com/dynamo/educacao/rss2.xml', 'http://rss.uol.com.br/feed/jogos.xml' ]
- #
- # videos = [
- # 'UCW9jLtlONRp7W-AK9F8M66Q', #Luis Felipe Pondé
- # 'UCP6L9TPS3pHccVRiDB_cvqQ', #Padre Paulo Ricardo
- # 'UCAL3JXZSzSm8AlZyD3nQdBA', #Primitive Technology
- # 'UC2ip-TMkPtTsb2eCIHz29rQ', #Eduardo Marinho
- # 'UCiEk4xHBbz0hZNIBBpowdYQ', #Jimmy Diresta
- # 'UCBvc7pmUp9wiZIFOXEp1sCg', #Demolition Ranch
- # 'UCJqOdpqndf1MPequlvDgGkA', #Tradutores de Direita
- # 'UCKSfUkYtc3wGCSSOoBbNUHA', #Maro Schweder
- # 'UC1-WVziv_VW5lEoGg4_tmng', #Menino Prendado
- # 'UC84itY69Ol2ASqv_tdb37JQ', #Area 51
- # 'UCjHDaKzT0-r3QaxFx8aRF3Q', #Makarov Tecnologia
- # 'UC-NwgkrLPYmzM-xoLr2GX-Q', #Ideias Radicais
- # 'UCcq1Xu3SKcwE1CpzGi-N68g', #Canal de HQs
- # 'UCEWOoncsrmirqnFqxer9lmA', #The Noite com Danillo Gentilli
- # 'UCQRPDZMSwXFEDS67uc7kIdg', #Buenas Ideias
- # 'UCL_f53ZEJxp8TtlOkHwMV9Q', #Jordan B Peterson
- # 'UCl79BVUfEZ830vH76L12ChA', #Quadro em Branco
- # 'UCFzODWkN1gXm4ryOgLK8Deg', #Ludoviajante
- # 'UCZq_CYXRoRjKqidapMPujaQ', #Entreplanos
- # 'UClu474HMt895mVxZdlIHXEA', #Nerdologia
- # 'UCpCJ1AS4afAWOJ5pNMFh4Dw', #Danielle Noce
- # 'UCE3mTpIMy1QSZ9FEqm8Fg7g', #Amigos por carros
- # 'UCGBIIPnw0AYM3BFsmTsjeAw', #Acelerados
- # 'UC_mSfchV-fgpPy-vuwML8_A', #Gustavo Cerbasi
- # 'UC8mDF5mWNGE-Kpfcvnn0bUg', #Me Poupe
- # 'UCxDFRhF3Y1A_Gd0-cF8gbqQ', #Mundo Militar
- # 'UCPyLXf8J1q1iNsMVa6S5siA', #Gosto de Armas
- # 'UCh7TUTXojlE8vRtb-EnuDzw', #Metaforando
- # 'UC70YG2WHVxlOJRng4v-CIFQ', #Gabriel Pato
- # 'UCj8_DeCUB0uL4cJwFmiQfMQ' #Hashtag Sal
- # 'UCSyG9ph5BJSmPRyzc_eGC4g', #ancap.su
- # 'UCKaLsYNQcHBE3YJmxkF07cQ', #legendas libertarias
- # 'UCBAPR-VRbL4InLKWOQ5GCMA', #sociedade da virtude
- # 'UCl79BVUfEZ830vH76L12ChA', #quadro em branco
- # ]
- #
- # random.shuffle(feeds)
- # random.shuffle(videos)
- #
- # for feed in feeds:
- # try:
- # news_table = get_news(feed, news_table)
- # except:
- # pass
- #
- # for channel in videos:
- # try:
- # video_table = get_videos(yt, channel, video_table)
- # except:
- # pass
- #
- # news_table = news_table[0:1000]
- # video_table = video_table[0:1000]
- #
- # coin_toss = random.randint(0,1)
- # if coin_toss == 1:
- # chosen_table = news_table
- # elif coin_toss == 0:
- # chosen_table = video_table
- #
- # print('Coin toss ',coin_toss)
- #
- # for row in chosen_table:
- # if row[3] == False:
- # title = row[0]
- # link = row[1]
- # r.subreddit('brasilivre').submit(title,url=link)
- # print('Success ', link)
- # row[3] = True
- # break
- #
- # for index,submission in enumerate(r.subreddit('brasilivre').rising(limit=3)):
- #
- # url = submission.url
- #
- # table = [ x[1] for x in news_table]
- # if url in table:
- # for table_index,item in enumerate(news_table):
- # if item[1] == url:
- # news_table[table_index][2] = True
- # break
- #
- # table = [ x[1] for x in video_table]
- # if url in table:
- # for table_index,item in enumerate(video_table):
- # if item[1] == url:
- # video_table[table_index][2] = True
- # break
- #
- # time.sleep(30)
- # cleanup_counter += 1
- #
- #
- # ### LIMPAR POSTS DE KARMA BAIXO
- #
- # if cleanup_counter % 120 == 0:
- # cleanup_counter = 0
- # myself = os.environ['REDDIT_USER']
- # for index,submission in enumerate(r.subreddit('brasilivre').new(limit=50)):
- # url = submission.url
- # author = submission.author
- #
- # table = [ x[1] for x in news_table ]
- # if url in table:
- # for table_index, item in enumerate(news_table):
- # if item[1] == url and item[2] == False and author == myself:
- # submission.delete()
- #
- # table = [ x[1] for x in video_table ]
- # if url in table:
- # for table_index, item in enumerate(video_table):
- # if item[1] == url and item[2] == False and author == myself:
- # submission.delete()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement