Categories
blandet

Wallnots Twitter-bot: Version 2

Det er ikke mange dage siden, at Wallnot.dk‘s Twitter-bot gik i luften. Du kan finde botten her og mit indlæg om den her.

Robotten virkede sådan set fint nok, men pga. en begrænsning i Twitter’s API på 250 forespørgsler per måned, kunne jeg kun opdatere 4 gange i døgnet, og det er jo ret sjældent (det gamle program lavede 2 forespørgsler, hver gang det blev kørt, dvs. 30 dage * 4 opdateringer * 2 forespørgsler = 240 forespørgsler).

Heldigvis fandt jeg TWINT, et Python-modul der gør det nemt at hente data fra Twitter uden at gøre brug af Twitter’s API med dets kedelige begrænsninger.

Med genbrug af det meste af min gamle kode, har jeg nu lavet en version af robotten, der kan køre lige så tit, jeg har lyst til. Jeg har foreløbig sat den til at køre 4 gange i timen.

For sjov skyld har jeg også tilføjet en række venlige adjektiver om abonnenterne på Politiken og Zetland, som programmet vælger tilfældigt mellem, hver gang det lægger et link på Twitter.

Den færdige kode

Her er den færdige kode, hvis du er interesseret.

# -*- coding: utf-8 -*-
# Author: Morten Helmstedt. E-mail: helmstedt@gmail.com

import requests
from bs4 import BeautifulSoup
from datetime import datetime
from datetime import date
from datetime import timedelta
import json
import time
import random
import twint	# https://github.com/twintproject/twint
from TwitterAPI import TwitterAPI

# CONFIGURATION #
# List to store articles to post to Twitter
articlestopost = []

# Yesterday's date variable
yesterday = date.today() - timedelta(days=1)
since = yesterday.strftime("%Y-%m-%d")

# Twint configuration
c = twint.Config()
c.Hide_output = True
c.Store_object = True
c.Since = since

# API LOGIN
client_key = ''
client_secret = ''
access_token = ''
access_secret = ''
api = TwitterAPI(client_key, client_secret, access_token, access_secret)


# POLITIKEN #
# Run search
searchterm = "politiken.dk/del"
c.Search = searchterm
twint.run.Search(c)
tweets = twint.output.tweets_object

# Add urls in tweets to list and remove any duplicates from list
urllist = []
for tweet in tweets:
	for url in tweet.urls:
		if searchterm in url:
			urllist.append(url)

urllist = list(set(urllist))

# Only proces urls that were not in our last Twitter query
proceslist = []
with open("./pol_lastbatch.json", "r", encoding="utf8") as fin:
	lastbatch = list(json.load(fin))
	for url in urllist:
		if url not in lastbatch:
			proceslist.append(url)
# Save current query to use for next time
with open("./pol_lastbatch.json", "wt", encoding="utf8") as fout:
	lastbatch = json.dumps(urllist)
	fout.write(lastbatch)

# Request articles and get titles and dates and sort by dates
articlelist = []
titlecheck = []

for url in proceslist:
	try:
		data = requests.get(url)
		result = data.text
		if '"isAccessibleForFree": "True"' not in result:
			soup = BeautifulSoup(result, "lxml")
			# Finds titles and timestamps
			title = soup.find('meta', attrs={'property':'og:title'})
			title = title['content']
			timestamp = soup.find('meta', attrs={'property':'article:published_time'})
			timestamp = timestamp['content']
			dateofarticle = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S%z')
			realurl = data.history[0].headers['Location']
			if title not in titlecheck:
				articlelist.append({"title": title, "url": realurl, "date": dateofarticle})
				titlecheck.append(title)			
	except Exception as e:
		print(url)
		print(e)
			
articlelist_sorted = sorted(articlelist, key=lambda k: k['date']) 

# Check if article is already posted and update list of posted articles
with open("./pol_published.json", "r", encoding="utf8") as fin:
	alreadypublished = list(json.load(fin))
	# File below used for paywall.py to update wallnot.dk
	with open("./pol_full_share_links.json", "r", encoding="utf8") as finalready:	
		alreadypublishedalready = list(json.load(finalready))
		for art in articlelist_sorted:
			url = art['url']
			token = url.index("?shareToken")
			url = url[:token]
			if url not in alreadypublished:
				alreadypublished.append(url)
				articlestopost.append(art)
				alreadypublishedalready.append(art['url'])
		# Save updated already published links
		with open("./pol_published.json", "wt", encoding="utf8") as fout:
			alreadypublishedjson = json.dumps(alreadypublished)
			fout.write(alreadypublishedjson)
		with open("./pol_full_share_links.json", "wt", encoding="utf8") as fout:
			alreadypublishedjson = json.dumps(alreadypublishedalready)
			fout.write(alreadypublishedjson)


# ZETLAND #
# Run search
searchterm = "zetland.dk/historie"
c.Search = searchterm
twint.run.Search(c)
tweets = twint.output.tweets_object

# Add urls in tweets to list and remove any duplicates from list
urllist = []
for tweet in tweets:
	for url in tweet.urls:
		if searchterm in url:
			urllist.append(url)

urllist = list(set(urllist))

# Only proces urls that were not in our last Twitter query
proceslist = []
with open("./zet_lastbatch.json", "r", encoding="utf8") as fin:
	lastbatch = list(json.load(fin))
	for url in urllist:
		if url not in lastbatch:
			proceslist.append(url)
# Save current query to use for next time
with open("./zet_lastbatch.json", "wt", encoding="utf8") as fout:
	lastbatch = json.dumps(urllist)
	fout.write(lastbatch)

# Request articles and get titles and dates and sort by dates
articlelist = []
titlecheck = []

for url in proceslist:
	try:
		data = requests.get(url)
		result = data.text
		soup = BeautifulSoup(result, "lxml")
		title = soup.find('meta', attrs={'property':'og:title'})
		title = title['content']
		timestamp = soup.find('meta', attrs={'property':'article:published_time'})
		timestamp = timestamp['content']
		timestamp = timestamp[:timestamp.find("+")]
		dateofarticle = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
		if title not in titlecheck:
			articlelist.append({"title": title, "url": url, "date": dateofarticle})
			titlecheck.append(title)
	except Exception as e:
		print(url)
		print(e)
			
articlelist_sorted = sorted(articlelist, key=lambda k: k['date']) 

# Check if article is already posted and update list of posted articles
with open("./zet_published.json", "r", encoding="utf8") as fin:
	alreadypublished = list(json.load(fin))
	for art in articlelist_sorted:
		title = art['title']
		if title not in alreadypublished:
			alreadypublished.append(title)
			articlestopost.append(art)
	# Save updated already published links
	with open("./zet_published.json", "wt", encoding="utf8") as fout:
		alreadypublishedjson = json.dumps(alreadypublished, ensure_ascii=False)
		fout.write(alreadypublishedjson)


# POST TO TWITTER #
friendlyterms = ["flink","rar","gavmild","velinformeret","intelligent","sød","afholdt","bedårende","betagende","folkekær","godhjertet","henrivende","smagfuld","tækkelig","hjertensgod","graciøs","galant","tiltalende","prægtig","kær","godartet","human","indtagende","fortryllende","nydelig","venlig","udsøgt","klog","kompetent","dygtig","ejegod","afholdt","omsorgsfuld","elskværdig","prægtig","skattet","feteret"]
enjoyterms = ["God fornøjelse!", "Nyd den!", "Enjoy!", "God læsning!", "Interessant!", "Spændende!", "Vidunderligt!", "Fantastisk!", "Velsignet!", "Glæd dig!", "Læs den!", "Godt arbejde!", "Wauv!"]

if articlestopost:
	for art in articlestopost:
		if "zetland" in art['url']:
			medium = "Zetland"
		else:
			medium = "Politiken"
		friendlyterm = random.choice(friendlyterms)
		enjoyterm = random.choice(enjoyterms)
		status = "En " + friendlyterm + " abonnent på " + medium + " har delt en artikel. " + enjoyterm + " " + art['url']
		r = api.request('statuses/update', {'status': status})
		time.sleep(15)
Categories
blandet

Sådan trækker jeg links til gratis Zetland-artikler ud fra Zetlands Twitter-konto til wallnot.dk

wallnot.dk udgiver jeg en liste over gratisartikler fra en lang række medier, der benytter sig af betalingsmure/paywall. Siden er ment som en service til brugere, der ved, at de gerne vil læse nyhedsartikler, og at de ikke vil betale for dem.

Zetland er ikke som de andre aviser. Der er ikke en forside med links til alle nypublicerede artikler. I stedet bruger Zetland Twitter til at lægge appetitvækkere ud.

Jeg syntes det var ærgerligt ikke at have Zetland med på Wallnot, så i stedet for at kigge efter links på forsiden, som Wallnot gør hos de andre medier, brugte jeg Twitters API til at trække artikellinks ud.

Her kan du se, hvordan jeg gjorde. Hvis du gerne vil prøve programmet af, skal du registrere dig som udvikler på Twitter.

# -*- coding: utf-8 -*-
# Author: Morten Helmstedt. E-mail: helmstedt@gmail.com
""" This program uses the Twitter API to get a list of free articles from Zetland """

import requests
from bs4 import BeautifulSoup
from datetime import datetime
from datetime import date
import json
from nested_lookup import nested_lookup
import base64


# GETS TWITTER DATA #

# Key and secret from Twitter developer account: https://developer.twitter.com/en/apply/user
client_key = ''
client_secret = ''

# Key and secret encoding, preparing for Twitter request
key_secret = '{}:{}'.format(client_key, client_secret).encode('ascii')
b64_encoded_key = base64.b64encode(key_secret)
b64_encoded_key = b64_encoded_key.decode('ascii')

base_url = 'https://api.twitter.com/'
auth_url = '{}oauth2/token'.format(base_url)

auth_headers = {
	'Authorization': 'Basic {}'.format(b64_encoded_key),
	'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'
}

auth_data = {
	'grant_type': 'client_credentials'
}

auth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data)
auth_resp.json().keys()
access_token = auth_resp.json()['access_token']

search_headers = {
	'Authorization': 'Bearer {}'.format(access_token)
}

# Search parameters for Zetland tweets
search_params = {
	'user_id': 452898921,
	'count': 35,
	'tweet_mode': 'extended',
	'exclude_replies': 'true',
	'trim_user': 'true'
}

# Request url for searching user timelines
search_url = '{}1.1/statuses/user_timeline.json'.format(base_url)

# Request to Twitter
search_resp = requests.get(search_url, headers=search_headers, params=search_params)

# Response from Twitter in json format
tweet_data = search_resp.json()
#prettyjson = json.dumps(tweet_data, ensure_ascii=False, indent=4) # Only needed for debugging to pretify json

# Looks for all instances of expanded_url (that is, links) in json	
linklist = list(set(nested_lookup('expanded_url', tweet_data)))

# Populates a list of links to Zetland articles
urllist = []
for link in linklist:
	if "zetland.dk/historie" in link:
		urllist.append(link)

		
# GETS ARTICLE DATA FROM ZETLAND #
		
# Requests articles and get titles and dates and sort by dates directly from Zetland site
articlelist = []
titlecheck = []

for url in urllist:
	try:
		data = requests.get(url)
		result = data.text

		# Soup site and create a dictionary of links and their titles and dates
		articledict = {}
		soup = BeautifulSoup(result, "lxml")

		title = soup.find('meta', attrs={'property':'og:title'})
		title = title['content']
		
		timestamp = soup.find('meta', attrs={'property':'article:published_time'})
		timestamp = timestamp['content']
		timestamp = timestamp[:timestamp.find("+")]
		dateofarticle = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
		
		if title not in titlecheck:
			articlelist.append({"title": title, "url": url, "date": dateofarticle})
			titlecheck.append(title)
	except:
		print(url)


# PREPARES LIST OF ARTICLES FOR WALLNOT.DK #

# Sort articles by date (newest first)		
articlelist_sorted = sorted(articlelist, key=lambda k: k['date'], reverse=True) 

# Removes articles older than approximately three months
articlelist_recent = []
now = datetime.now()
for article in articlelist_sorted:
	timesincelast = now - article["date"]
	if timesincelast.days < 92:
		articlelist_recent.append(article)

# Converts dates to friendly format for display and outputs articles as html paragraphs
zet_linkstr = ""
for article in articlelist_recent:
	friendlydate = article["date"].strftime("%d/%m")
	zet_linkstr += '<p>' + friendlydate + ': ' + '<a href="' + article["url"] + '">' + article["title"] + '</a></p>\n' 

# Prints list of articles	
print(zet_linkstr)