Du kan nu finde opdaterede udgaver af mine forskellige værktøjer til at hente data på Nordnet på https://github.com/helmstedt/nordnet-utilities. God fornøjelse.
Tag: programmering
Digital Post fra mit.dk til din e-mail
På https://github.com/helmstedt/digitalpost-utilities er jeg gået i luften med et program, der gør det muligt for dig at slippe for at logge ind på mit.dk hver gang du har fået ny Digital Post.
Jeg brugte https://github.com/dk/Net-MitDK til at forstå metodikken og Fiddler til at overvåge trafikken til og fra https://mit.dk og aflure sidens API.
De to hovedkomponenter i programmet er a) et program til at gennemføre første login på mit.dk i en browser med NemID/MitID og b) et program til at forny adgangstokens til siden, forespørge API’et om ny post og sende e-mails af sted.
Program til at gennemføre første login på mit.dk i en browser med NemID/MitId
# Logs in to mit.dk og saves tokens needed for further requests.
# Method from https://github.com/dk/Net-MitDK/. Thank you.
from seleniumwire import webdriver
import requests
from bs4 import BeautifulSoup
import http.cookies
import gzip
import json
import base64
from hashlib import sha256
import string
import secrets
from mit_dk_configuration import tokens_filename
def random_string(size):
letters = string.ascii_lowercase+string.ascii_uppercase+string.digits+string.punctuation+string.whitespace
random_string = ''.join(secrets.choice(letters) for i in range(size))
encoded_string = random_string.encode(encoding="ascii")
url_safe_string = base64.urlsafe_b64encode(encoded_string).decode()
url_safe_string_no_padding = url_safe_string.replace('=','')
return url_safe_string_no_padding
def save_tokens(response):
with open(tokens_filename, "wt", encoding="utf8") as token_file:
token_file.write(response)
state = random_string(23)
nonce = random_string(93)
code_verifier = random_string(93)
code_challenge = base64.urlsafe_b64encode(sha256(code_verifier.encode('ascii')).digest()).decode().replace('=','')
login_url = 'https://gateway.mit.dk/view/client/authorization/login?client_id=view-client-id-mobile-prod-1-id&response_type=code&scope=openid&state=' + state + '&code_challenge=' + code_challenge + '&code_challenge_method=S256&response_mode=query&nonce=' + nonce + '&redirect_uri=com.netcompany.mitdk://nem-callback&deviceName=digitalpost-utilities&deviceId=pc&lang=en_US'
options = webdriver.ChromeOptions()
options.add_argument("--log-level=3")
driver = webdriver.Chrome(chrome_options=options)
login = driver.get(login_url)
print("Opening browser window. Log in to mit.dk using MitID or NemID in the browser.")
print("When you see a blank page in your browser at https://nemlog-in.mitid.dk/LoginOption.aspx, you're finished.")
input("Press ENTER once you're finished.")
session = requests.Session()
for request in driver.requests:
session.cookies.set('cookiecheck', 'Test', domain='nemlog-in.mitid.dk')
session.cookies.set('loginMethod', 'noeglekort', domain='nemlog-in.mitid.dk')
for request in driver.requests:
if '/api/mailboxes' in request.url and request.method == 'GET' and request.response.status_code == 200:
cookies = request.headers['Cookie'].split("; ")
for cookie in cookies:
if 'LoggedInBorgerDk' in cookie or 'CorrelationId' in cookie:
key_value = cookie.split('=')
session.cookies.set(key_value[0], key_value[1], domain='.post.borger.dk')
if request.response:
headers_string = str(request.response.headers)
headers_list = headers_string.split('\n')
for header in headers_list:
if 'set-cookie' in header:
cookie_string = header.replace('set-cookie: ','')
cookie = http.cookies.BaseCookie(cookie_string)
for key in cookie.keys():
# Requests is picky about dashes in cookie expiration dates. Fix.
if 'expires' in cookie[key]:
expiry = cookie[key]['expires']
if expiry:
expiry_list = list(expiry)
expiry_list[7] = '-'
expiry_list[11] = '-'
cookie[key]['expires'] = ''.join(expiry_list)
session.cookies.update(cookie)
if request.method == 'POST' and request.url == 'https://nemlog-in.mitid.dk/LoginOption.aspx' and request.response.status_code == 200:
if request.response.headers['content-encoding'] == 'gzip':
response = gzip.decompress(request.response.body).decode()
else:
response = request.response.body.decode()
soup = BeautifulSoup(response, "html.parser")
input = soup.find_all('input', {"name":"SAMLResponse"})
samlresponse = input[0]["value"]
driver.close()
request_code_part_one = session.post('https://gateway.digitalpost.dk/auth/s9/nemlogin/ssoack', data={'SAMLResponse': samlresponse}, allow_redirects=False)
request_code_part_one_redirect_location = request_code_part_one.headers['Location']
request_code_part_two = session.get(request_code_part_one_redirect_location, allow_redirects=False)
request_code_part_two_redirect_location = request_code_part_two.headers['Location']
request_code_part_three = session.get(request_code_part_two_redirect_location, allow_redirects=False)
request_code_part_three_redirect_location = request_code_part_three.headers['Location']
code_start = request_code_part_three_redirect_location.index('code=') + 5
code_end = request_code_part_three_redirect_location.index('&', code_start)
code = request_code_part_three_redirect_location[code_start:code_end]
redirect_url = 'com.netcompany.mitdk://nem-callback'
token_url = 'https://gateway.mit.dk/view/client/authorization/token?grant_type=authorization_code&redirect_uri=' + redirect_url + '&client_id=view-client-id-mobile-prod-1-id&code=' + code + '&code_verifier=' + code_verifier
request_tokens = session.post(token_url)
save_tokens(request_tokens.text)
print('Login to mit.dk went fine.')
print(f'Tokens saved to {tokens_filename}.')
Program til at forny adgangstokens til siden, forespørge API’et om ny post og sende e-mails af sted
# Sends unread messages from mit.dk to an e-mail.
import requests
import json
import smtplib # Sending e-mails
from email.mime.multipart import MIMEMultipart # Creating multipart e-mails
from email.mime.text import MIMEText # Attaching text to e-mails
from email.mime.application import MIMEApplication # Attaching files to e-mails
from email.utils import formataddr # Used for correct encoding of senders with special characters in name (e.g. Københavns Kommune)
from mit_dk_configuration import email_data, tokens_filename
base_url = 'https://gateway.mit.dk/view/client/'
session = requests.Session()
def open_tokens():
try:
with open(tokens_filename, "r", encoding="utf8") as token_file:
tokens = json.load(token_file)
return tokens
except:
return print('Unable to open and parse token file. Did you run mit_dk_first_login.py?')
def revoke_old_tokens(mitdkToken, ngdpToken, dppRefreshToken, ngdpRefreshToken):
endpoint = 'authorization/revoke?client_id=view-client-id-mobile-prod-1-id'
json_data = {
'dpp': {
'token': mitdkToken,
'token_type_hint': 'access_token'
},
'ngdp': {
'token': ngdpToken,
'token_type_hint': 'access_token'
},
}
revoke_access_tokens = session.post(base_url + endpoint, json=json_data)
if not revoke_access_tokens.status_code == 200:
print("Something went wrong when trying to revoke old access tokens. Here is the response:")
print(revoke_access_tokens.text)
json_data = {
'dpp': {
'refresh_token': dppRefreshToken,
'token_type_hint': 'refresh_token'
},
'ngdp': {
'refresh_token': ngdpRefreshToken,
'token_type_hint': 'refresh_token'
},
}
revoke_refresh_tokens = session.post(base_url + endpoint, json=json_data)
if not revoke_refresh_tokens.status_code == 200:
print("Something went wrong when trying to revoke old refresh tokens. Here is the response:")
print(revoke_refresh_tokens.text)
def refresh_and_save_tokens(dppRefreshToken, ngdpRefreshToken):
endpoint = 'authorization/refresh?client_id=view-client-id-mobile-prod-1-id'
json_data = {
'dppRefreshToken': dppRefreshToken,
'ngdpRefreshToken': ngdpRefreshToken,
}
refresh = session.post(base_url + endpoint, json=json_data)
if not refresh.status_code == 200:
print("Something went wrong trying to fetch new tokens.")
refresh_json = refresh.json()
if 'code' in refresh_json:
print("Something went wrong trying to fetch new tokens. Here's the response:")
print(refresh_json)
return False
else:
with open(tokens_filename, "wt", encoding="utf8") as token_file:
token_file.write(refresh.text)
return refresh_json
def get_fresh_tokens_and_revoke_old_tokens():
tokens = open_tokens()
try:
if 'dpp' in tokens:
dppRefreshToken = tokens['dpp']['refresh_token']
mitdkToken = tokens['dpp']['access_token']
else:
dppRefreshToken = tokens['refresh_token']
mitdkToken = tokens['access_token']
ngdpRefreshToken = tokens['ngdp']['refresh_token']
ngdpToken = tokens['ngdp']['access_token']
fresh_tokens = refresh_and_save_tokens(dppRefreshToken, ngdpRefreshToken)
if fresh_tokens:
revoke_old_tokens(mitdkToken, ngdpToken, dppRefreshToken, ngdpRefreshToken)
return fresh_tokens
except Exception as error:
print(error)
print('Unable to find tokens in token file. Try running mit_dk_first_login.py again.')
def get_simple_endpoint(endpoint):
response = session.get(base_url + endpoint)
return response.json()
def get_inbox_folders_and_build_query(mailbox_ids):
endpoint = 'folders/query'
json_data = {
'mailboxes': {}
}
for mailbox in mailbox_ids:
json_data['mailboxes'][mailbox['dataSource']] = mailbox['mailboxId']
response = session.post(base_url + endpoint, json=json_data)
try:
response_json = response.json()
except:
print('Unable to convert response to json. Here is the response:')
print(response.text)
folders = []
for folder in response_json['folders']['INBOX']:
folder_info = {
'dataSource': folder['dataSource'],
'foldersId': [folder['id']],
'mailboxId': folder['mailboxId'],
'startIndex': 0
}
folders.append(folder_info)
return folders
def get_messages(folders):
endpoint = 'messages/query'
json_data = {
'any': [],
'folders': folders,
'size': 20,
'sortFields': ['receivedDateTime:DESC']
}
response = session.post(base_url + endpoint, json=json_data)
return response.json()
def get_content(message):
content = []
endpoint = message['dataSource'] + '/mailboxes/' + message['mailboxId'] + '/messages/' + message['id']
for document in message['documents']:
doc_url = '/documents/' + document['id']
for file in document['files']:
encoding_format = file['encodingFormat']
file_name = file['filename']
file_url = '/files/' + file['id'] + '/content'
file_content = session.get(base_url + endpoint + doc_url + file_url)
content.append({
'file_name': file_name,
'encoding_format': encoding_format,
'file_content': file_content
})
return content
def mark_as_read(message):
endpoint = message['dataSource'] + '/mailboxes/' + message['mailboxId'] + '/messages/' + message['id']
session.headers['If-Match'] = str(message['version'])
json_data = {
'read': True
}
mark_as_read = session.patch(base_url + endpoint, json=json_data)
mailserver_connect = False
tokens = get_fresh_tokens_and_revoke_old_tokens()
if tokens:
session.headers['mitdkToken'] = tokens['dpp']['access_token']
session.headers['ngdpToken'] = tokens['ngdp']['access_token']
session.headers['platform'] = 'web'
mailboxes = get_simple_endpoint('mailboxes')
mailbox_ids = []
for mailboxes in mailboxes['groupedMailboxes']:
for mailbox in mailboxes['mailboxes']:
mailbox_info = {
'dataSource': mailbox['dataSource'],
'mailboxId': mailbox['id']
}
mailbox_ids.append(mailbox_info)
folders = get_inbox_folders_and_build_query(mailbox_ids)
messages = get_messages(folders)
for message in messages['results']:
if message['read'] == False:
if mailserver_connect == False:
server = smtplib.SMTP(email_data['emailserver'], email_data['emailserverport'])
server.ehlo()
server.starttls()
server.login(email_data['emailusername'], email_data['emailpassword'])
mailserver_connect = True
label = message['label']
sender = message['sender']['label']
message_content = get_content(message)
msg = MIMEMultipart('alternative')
msg['From'] = formataddr((sender, email_data['emailfrom']))
msg['To'] = email_data['emailto']
msg['Subject'] = "mit.dk: " + label
for content in message_content:
if content['encoding_format'] == 'text/plain':
body = content['file_content'].text
msg.attach(MIMEText(body, 'plain'))
part = MIMEApplication(content['file_content'].content)
part.add_header('Content-Disposition', 'attachment', filename=content['file_name'])
msg.attach(part)
elif content['encoding_format'] == 'text/html':
body = content['file_content'].text
msg.attach(MIMEText(body, 'html'))
part = MIMEApplication(content['file_content'].content)
part.add_header('Content-Disposition', 'attachment', filename=content['file_name'])
msg.attach(part)
elif content['encoding_format'] == 'application/pdf':
part = MIMEApplication(content['file_content'].content)
part.add_header('Content-Disposition', 'attachment', filename=content['file_name'])
msg.attach(part)
else:
encoding_format = content['encoding_format']
print(f'Ny filtype {encoding_format}')
part = MIMEApplication(content['file_content'].content)
part.add_header('Content-Disposition', 'attachment', filename=content['file_name'])
msg.attach(part)
print(f'Sender en mail fra mit.dk fra {sender} med emnet {label}')
server.sendmail(email_data['emailfrom'], email_data['emailto'], msg.as_string())
mark_as_read(message)
if mailserver_connect:
server.quit()
Besked når der åbnes for reservationer på [hypet restaurant i København]
En restaurant, jeg gerne vil prøve, er fuldstændig booket op og har endnu ikke åbnet op for reservationer i april. Hvordan kan jeg komme først til fadet?
Jeg besøgte reservationssystemet og iagttog hvordan det interne API spurgte om ledige borde.
Dette billede viser forespørgslen:

Dette billede viser svaret fra API’et:

For april så svaret sådan her ud:

API’et svarer altså med en tom ‘data’-nøgle, når der ikke er åbnet for reservationer endnu.
Jeg skrev et lille program, som jeg har sat til at køre hvert 5. minut, for at tjekke om jeg kan komme til at reservere. Programmet tjekker, om der er kommet noget indhold i ‘data’-nøglen i svaret fra API’et. Hvis der er, sender det mig en besked om, at jeg godt kan komme i gang med at reservere bord.

Næste skridt kunne være at udvide programmet, sådan det også reserverer bordet for mig. Men i første omgang prøver jeg at gøre den del af arbejdet selv.
Folkets wallnot.dk
En bruger på wallnot.dk skrev til mig og foreslog at lade brugerne på siden vurdere kvaliteten af de artikler, siden linker til, ligesom på fx Hacker News. Idéen er at gode artikler så kan ligge øverst, mens metervaren synker ned i bunden – hvis altså folket har forstand på at vurdere den slags.
Jeg forsøgte at lave en sådan løsning, og den kan du nu prøve af.
Dynamik med JavaScript
For at gøre det helt smart og dynamisk, havde jeg brug for noget JavaScript, der kan fyre en stemme af sted, så snart en bruger klikker på ▲ eller ▼.
Jeg er ikke helt ferm til JavaScript, men jeg begynder at forstå det, og med god hjælp og lidt copy/paste fra forskellige kilder, landede jeg til sidst på noget kode, der ser ud til at virke.
Den første del henter en såkaldt CRSF-cookie, der sørger for, at man er nødt til at besøge Wallnot, inden man kan stemme på artikler, og at man ikke kan stemme på vegne af andre fra andre hjemmesider.
Den anden del sender en forespørgsel af sted med cookie-værdien og selve stemmen og opdaterer stemmeantallet på siden, når forespørgslen er behandlet.
function getCookie(name) {
let cookieValue = null;
if (document.cookie && document.cookie !== '') {
const cookies = document.cookie.split(';');
for (let i = 0; i < cookies.length; i++) {
const cookie = cookies[i].trim();
if (cookie.substring(0, name.length + 1) === (name + '=')) {
cookieValue = decodeURIComponent(cookie.substring(name.length + 1));
break;
}
}
}
return cookieValue;
}
const csrftoken = getCookie('csrftoken');
document.querySelectorAll('.vote').forEach(function(el){
el.addEventListener('click', function() {
article_votes_id = this.id.substring(0, this.id.indexOf('_')) + '_votes';
votes_to_replace = document.getElementById(article_votes_id)
fetch('/process_vote', {
method: "POST",
headers: {
"X-CSRFToken": csrftoken,
},
body: JSON.stringify({
vote: this.id
})
}).then(function (response) {
return response.json();
})
.then(function (data) {
votes_to_replace.innerHTML = data.votes;
})
.catch(function (err) {
console.log(err);
});
});
});
Behandling af forespørgslen
Forespørgslen sender et artikel-id af sted sammen med information om der er tale om en ▲-stemme eller en ▼-stemme.
I Djangos views.py skriver jeg en funktion, der kan modtage forespørgslen og returnerer stemmeantallet efter forespørgslen er behandlet. Funktionen sender JSON-data tilbage til mit JavaScript, hvis (og kun hvis) stemmen har et eksisterende artikel-id efterfulgt af enten “_up” eller “_down”. For alt andet svarer funktionen tilbage, at den er en tepotte og derfor ikke kan hjælpe:
def process_vote(request):
if request.method == "POST":
try:
vote = json.loads(request.body.decode())['vote']
article_id = vote[:vote.index('_')]
article = Article.objects.get(id=article_id)
if '_up' in vote:
article.votes += 1
elif '_down' in vote:
article.votes -= 1
else:
return HttpResponse(status=418)
article.save()
votes = {'votes': article.votes}
return JsonResponse(votes)
except:
return HttpResponse(status=418)
return HttpResponse(status=418)
En sorteringsalgoritme
Som det allersidste havde jeg brug for at udvikle en sorteringsalgoritme, der tog højde for artiklers alder, som jeg kunne bruge i mit view. Den tog lidt tid at skrive, fordi det nogle gange kan være svært at regne ud, hvordan man med Djangos databaseforespørgselssyntaks kan lave de beregninger, man har brug for, direkte med forespørgslen til databasen.
Algoritmen gør sådan her:
- Tager antal stemmer og lægger 1 til. Hvis alle artikler starter på 1, forhindrer jeg at artikler med et positivt antal stemmer altid vil ligge over artikler uden stemmer overhovedet.
- Deler dette tal med 1 plus antal timer siden artiklens offentliggørelsestidspunkt.
- Antal timer udregnes ved at tage antal dage siden offentliggørelsestidspunktet og gange med 24 og dertil lægge det yderligere antal timer fra det samlede interval i dage og timer siden offentliggørelsestidspunktet.
- For at undgå at komme til at dele med 0, lægger jeg 1 til antal timer og tager den absolutte værdi af antal timer siden offentliggørelsestidspunktet. Det er nødvendigt, fordi medierne engang imellem offentliggør artikler med et publiceringstidspunkt i fremtiden.
- Fordi jeg deler stemmer med antal timer siden offentliggørelse, vil en nyhed hurtigt miste sin “værdi”. Hvis Folkets Wallnot ikke bliver en kæmpe succes, kan det være at jeg skal dele med antal dage i stedet, sådan “straffen” for at være en gammel artikel ikke bliver ligeså mærkbar.
Her er algoritmen skrevet som forespørgsel i Django:
articles = Article.objects.filter(paywall_detected=False)
.annotate(score=ExpressionWrapper((F('votes') + 1) /
(1+Abs(ExtractDay(Now()-F('date'))*24 + ExtractHour(Now()-F('date')))),output_field=FloatField()))
.order_by('-score','-date')
Min egen private eReolen
Jeg kan godt lide at læse bøger, og mange af dem låner jeg på eReolen. Men for en bognørd er eReolen ikke særlig brugervenlig. Der er godt nok en masse mærkelige søgninger, man kan lave, hvis man er teknisk nok, men sådan noget som at se, hvad der rent faktisk er nytilføjede bøger, er svært at følge med i.
eReolen har godt nok en sektion, de kalder “nyheder”, med en søgning der i skrivende stund (februar 2022) hedder noget i retning af:
(dkcclterm.op=202112* OR dkcclterm.op=202201*) AND term.type=ebog and facet.category=voksenmaterialer
Slår man op i beskrivelsen af brøndindekser, kan man se at “dkcclterm.op” dækker over:
dkcclterm.op | op | Oprettelsesdato |
Men hvordan kan det være, at en visning af nyheder søger på oprettelsesdatoer i december og januar? Det er februar nu.
Fordi “Oprettelsesdato” for en titel ikke er det samme som dato for titlens tilføjelse på eReolen. Hvad det betyder, ved jeg ikke med sikkerhed, men i hvert fald ikke titlens tilføjelse på eReolen.
Og det betyder, at der løbende kan dukke spændende bøger op, hvis “dkcclterm.op”-værdi ligger langt tilbage i tiden.
Og det betyder, at jeg kan risikere at misse noget, jeg gerne vil læse.
Hvad gjorde jeg så?
Jeg byggede min egen eReolen! Med en robot, der hver nat monitorerer, hvilke titler der rent faktisk er nye. Hver morgen ligger der en mail til mig om, hvor mange titler robotten har fundet, og hvis jeg har tid og kaffe til det, kan jeg kigge de nye titler igennem over morgenkaffen.
Det fungerer sådan her:
I Django byggede jeg en datamodel over titler med forskellige metadata:
from django.db import models
from isbn_field import ISBNField
class Author(models.Model):
full_name = models.CharField('Forfatter', max_length=200, unique=True)
birth_year = models.DateField(null=True)
def __str__(self):
return self.full_name
class Publisher(models.Model):
publisher = models.CharField('Udgiver', max_length=200, unique=True)
def __str__(self):
return self.publisher
class Keyword(models.Model):
keyword = models.CharField('Nøgleord', max_length=200, unique=True)
def __str__(self):
return self.keyword
class TitleType(models.Model):
title_type = models.CharField('Type', max_length=200, unique=True)
def __str__(self):
return self.title_type
class Language(models.Model):
language = models.CharField('Sprog', max_length=50, unique=True)
def __str__(self):
return self.language
class Isbn(models.Model):
isbn = ISBNField(null=True, blank=True)
def __str__(self):
return self.isbn
class Audience(models.Model):
audience = models.CharField('Målgruppe', max_length=200, unique=True)
def __str__(self):
return self.audience
class TitleFormat(models.Model):
title_format = models.CharField('Format', max_length=50, unique=True)
def __str__(self):
return self.title_format
class Title(models.Model):
added = models.DateField()
object_id = models.CharField('Ereolen-id', max_length=50, unique=True)
title = models.CharField('Titel', max_length=500)
original_title = models.CharField('Originaltitel', max_length=500, default="")
publish_date = models.DateField(null=True)
dk5 = models.CharField('DK5-kode', max_length=10, default="")
cover_url = models.URLField('Cover-url', max_length=500, null=True)
ereolen_url = models.URLField('Ereolen-url', max_length=500)
abstract = models.TextField(blank=True)
dkcclterm_op = models.DateField()
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE)
language = models.ForeignKey(Language, on_delete=models.CASCADE)
title_type = models.ForeignKey(TitleType, on_delete=models.CASCADE)
title_format = models.ForeignKey(TitleFormat, on_delete=models.CASCADE)
author = models.ManyToManyField(Author)
keyword = models.ManyToManyField(Keyword)
audience = models.ManyToManyField(Audience)
isbn = models.ManyToManyField(Isbn)
def __str__(self):
return self.title
def get_authors(self):
return " & ".join([author.full_name for author in self.author.all()])
get_authors.short_description = "Author(s)"
def get_isbns(self):
return ", ".join([isbn.isbn for isbn in self.isbn.all()])
get_isbns.short_description = "ISBN(s)"
def get_keywords(self):
return ", ".join([keyword.keyword for keyword in self.keyword.all()])
get_keywords.short_description = "Keyword(s)"
def get_audiences(self):
return ", ".join([audience.audience for audience in self.audience.all()])
get_audiences.short_description = "Audience(s)"
I Python skrev jeg en robot, der søger eReolen igennem, tilføjer nye titler til min database og ignorerer titler, der allerede er i databasen. Robotten satte jeg op til at køre hver nat på min server:
# -*- coding: utf-8 -*-
# Author: Morten Helmstedt. E-mail: helmstedt@gmail.com
""" This program saves ebooks, audiobooks and podcasts from ereolen.dk to a local database
that can be used to detect new titles better than ereolen.dk's own search options """
import requests # make http requests
from bs4 import BeautifulSoup # parse html responses
from datetime import date # create date objects
from dateutil.relativedelta import relativedelta # adding and subtracting months to dates
import re # regex for publish year parsing
import psycopg2 # work with postgresql databases
from psycopg2 import Error # database error handling
# Connect to database
try:
connection = psycopg2.connect(user = "",
password = "",
host = "",
port = "",
database = "")
cursor = connection.cursor()
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
# Set configuration options and global variables
base_url = 'https://ereolen.dk'
term_types = ['ebog','lydbog','podcast']
added = date.today()
number_of_months_to_search = 200
start_month = added - relativedelta(months=number_of_months_to_search-2)
# Search period list goes from current month plus one month and back to start_month
search_period = []
for i in reversed(range(0,number_of_months_to_search)):
year_month_date = start_month + relativedelta(months=+i)
year_month = [year_month_date.year, year_month_date.month]
search_period.append(year_month)
# Crawl loop
title_counter = 0
for year_month in search_period:
for term_type in term_types:
start_date = date(year_month[0],year_month[1],1)
dkcclterm_op_search = start_date.strftime("%Y%m")
page = 0
pages_left = True
while pages_left == True:
# Search for hits
search_url = base_url + '/search/ting/dkcclterm.op%3D' + dkcclterm_op_search + '*%20AND%20term.type%3D' + term_type + '?page=' + str(page) + '&sort=date_descending'
request = requests.get(search_url)
result = request.text
# If an error message is returned in the search, either no results are left, or ereolen.dk is down for some reason
# In this case, the while loop is broken to try next item type and/or next year-month combination
if 'Vi kan desværre ikke finde noget, der matcher din søgning' in result or 'The website encountered an unexpected error. Please try again later.' in result:
pages_left = False
break
# Parse hits and get all item links
soup = BeautifulSoup(result, "lxml")
links = soup.find_all('a', href=True)
item_links = {link['href'] for link in links if "/ting/collection/" in link['href']}
# Go through item link
for link in item_links:
# Get id and check if link is already in databse
object_id = link[link.rfind('/')+1:].replace('%3A',':')
search_sql = '''SELECT * from ereolen_title WHERE object_id = %s'''
cursor.execute(search_sql, (object_id, ))
item_hit = cursor.fetchone()
# No hits means item is not in database and should be added
if not item_hit:
### ADD SEQUENCE ###
# Set full url for item
ereolen_url = base_url + link
# Request item and parse html
title_request = requests.get(ereolen_url)
title_result = title_request.text
title_soup = BeautifulSoup(title_result, "lxml")
# TITLE FIELDS #
# TITLE
try:
title = title_soup.find('div', attrs={'class':'field-name-ting-title'}).text.replace(" : ",": ")
except:
print("Ingen titel på:", ereolen_url)
break
# ORIGINAL TITLE
try:
original_title = title_soup.find('div', attrs={'class':'field-label'}, string=re.compile("Original titel:")).next.next.text
except:
original_title = ''
# PUBLISHED
try:
published = title_soup.find('div', class_={"field-name-ting-author"}).get_text()
published = int(re.search("[(]\d\d\d\d[)]", published).group()[1:5])
publish_date = date(published,1,1)
except:
publish_date = None
# COVER URL
try:
cover_url = title_soup.find('div', class_={"ting-cover"}).img['src']
except:
try:
data = {
'coverData[0][id]': object_id,
'coverData[0][image_style]': 'ding_primary_large'
}
response = requests.post('https://ereolen.dk/ting/covers', data=data)
response_json = response.json()
cover_url = response_json[0]['url']
except:
cover_url = ''
# ABSTRACT
abstract = title_soup.find('div', attrs={'class':'field-name-ting-abstract'}).text
# DKCCLTERM_OP
dkcclterm_op = start_date
# FOREIGN KEY FIELDS #
# LANGUAGE
try:
ereolen_language = title_soup.find('div', attrs={'class':'field-label'}, string=re.compile("Sprog:")).next.next.text
except:
ereolen_language = 'Ukendt'
language_sql = '''SELECT * from ereolen_language WHERE language = %s'''
cursor.execute(language_sql, (ereolen_language, ))
try:
language = cursor.fetchone()[0]
except:
language_insert = '''INSERT INTO ereolen_language(language) VALUES(%s) RETURNING id'''
cursor.execute(language_insert, (ereolen_language, ))
language = cursor.fetchone()[0]
# PUBLISHER
try:
ereolen_publisher = title_soup.find('div', attrs={'class':'field-label'}, string=re.compile("Forlag:")).next.next.text
except:
ereolen_publisher = 'Ukendt'
publisher_sql = '''SELECT * from ereolen_publisher WHERE publisher = %s'''
cursor.execute(publisher_sql, (ereolen_publisher, ))
try:
publisher = cursor.fetchone()[0]
except:
publisher_insert = '''INSERT INTO ereolen_publisher(publisher) VALUES(%s) RETURNING id'''
cursor.execute(publisher_insert, (ereolen_publisher, ))
publisher = cursor.fetchone()[0]
# TYPE
try:
ereolen_type = title_soup.find('div', attrs={'class':'field-label'}, string=re.compile("Type:")).next.next.text
except:
ereolen_type = 'Ukendt'
type_sql = '''SELECT * from ereolen_titletype WHERE title_type = %s'''
cursor.execute(type_sql, (ereolen_type, ))
try:
title_type = cursor.fetchone()[0]
except:
title_type_insert = '''INSERT INTO ereolen_titletype(title_type) VALUES(%s) RETURNING id'''
cursor.execute(title_type_insert, (ereolen_type, ))
title_type = cursor.fetchone()[0]
# FORMAT
try:
ereolen_format = title_soup.find('div', attrs={'class':'field-label'}, string=re.compile("Ebogsformat:")).next.next.text
except:
ereolen_format = "Ukendt"
format_sql = '''SELECT * from ereolen_titleformat WHERE title_format = %s'''
cursor.execute(format_sql, (ereolen_format, ))
try:
title_format = cursor.fetchone()[0]
except:
title_format_insert = '''INSERT INTO ereolen_titleformat(title_format) VALUES(%s) RETURNING id'''
cursor.execute(title_format_insert, (ereolen_format, ))
title_format = cursor.fetchone()[0]
# DK5 - TODO: Not done yet
dk5 = ""
### SAVE BEFORE ADDING MANY-TO-MANY FIELDS ###
title_data = (added,title_type,title,original_title,publisher,object_id,language,publish_date,cover_url,ereolen_url,title_format,abstract,dkcclterm_op,dk5)
title_insert = '''INSERT INTO ereolen_title(added,title_type_id,title,original_title,publisher_id,object_id,language_id,publish_date,cover_url,ereolen_url,title_format_id,abstract,dkcclterm_op,dk5) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id'''
cursor.execute(title_insert, title_data)
title_id = cursor.fetchone()[0]
connection.commit()
# MANY-TO-MANY FIELDS #
# AUDIENCE(S)
try:
audience_div = title_soup.find('div', attrs={'class':'field-label'}, string=re.compile("Målgruppe:")).next.next
audiences = audience_div.find_all('span')
audiences_list = [aud.text for aud in audiences]
except:
audiences_list = ['Ukendt']
for audience in audiences_list:
audience_sql = '''SELECT * from ereolen_audience WHERE audience = %s'''
cursor.execute(audience_sql, (audience, ))
try:
audience_id = cursor.fetchone()[0]
except:
audience_insert = '''INSERT INTO ereolen_audience(audience) VALUES(%s) RETURNING id'''
cursor.execute(audience_insert, (audience, ))
audience_id = cursor.fetchone()[0]
audience_relation_sql = '''INSERT INTO ereolen_title_audience (title_id, audience_id) VALUES (%s,%s)'''
try:
cursor.execute(audience_relation_sql, (title_id,audience_id))
except:
connection.rollback()
# ISBN(S)
try:
isbn_div = title_soup.find('div', attrs={'class':'field-label'}, string=re.compile("ISBN:")).next.next
isbns = isbn_div.find_all('span')
isbns_list = [isb.text for isb in isbns]
for isbn in isbns_list:
isbn_sql = '''SELECT * from ereolen_isbn WHERE isbn = %s'''
cursor.execute(isbn_sql, (isbn, ))
try:
isbn_id = cursor.fetchone()[0]
except:
isbn_insert = '''INSERT INTO ereolen_isbn(isbn) VALUES(%s) RETURNING id'''
cursor.execute(isbn_insert, (isbn, ))
isbn_id = cursor.fetchone()[0]
isbn_relation_sql = '''INSERT INTO ereolen_title_isbn (title_id, isbn_id) VALUES (%s,%s)'''
try:
cursor.execute(isbn_relation_sql, (title_id,isbn_id))
except:
connection.rollback()
except:
pass
# KEYWORDS(S)
keywords_div = title_soup.find('div', attrs={'class':'field-name-ting-subjects'})
if keywords_div:
keywords = [link.text for link in keywords_div.find_all('a')]
for keyword in keywords:
keyword_sql = '''SELECT * from ereolen_keyword WHERE keyword = %s'''
cursor.execute(keyword_sql, (keyword, ))
try:
keyword_id = cursor.fetchone()[0]
except:
keyword_insert = '''INSERT INTO ereolen_keyword(keyword) VALUES(%s) RETURNING id'''
cursor.execute(keyword_insert, (keyword, ))
keyword_id = cursor.fetchone()[0]
keyword_relation_sql = '''INSERT INTO ereolen_title_keyword (title_id, keyword_id) VALUES (%s,%s)'''
try:
cursor.execute(keyword_relation_sql, (title_id,keyword_id))
except:
connection.rollback()
# AUTHOR(S)
creator_full = title_soup.find('div', attrs={'class':'field-name-ting-author'}).text.replace("Af ","")
# Remove date of book
creator = creator_full[:creator_full.rfind("(")-1]
authors = creator.split(",")
for author in authors:
birth_year = None
if ' (f. ' in author and not len(author) < 7:
if 'ca. ' in author:
author = author.replace('ca. ','')
birth_year_string = author[author.index("(f.")+4:author.index("(f.")+8]
if ')' in birth_year_string:
birth_year_string = birth_year_string.replace(')','')
birth_year = date(int(birth_year_string),1,1)
author = author[:author.index(" (f.")]
elif ' (f. ' in author:
breakpoint()
# Some times there are no authors, but still a published year
if len(author) == 5 and "(" in author:
author = ""
if author:
author = author.strip()
author_sql = '''SELECT * from ereolen_author WHERE full_name = %s'''
cursor.execute(author_sql, (author, ))
try:
author_id = cursor.fetchone()[0]
except:
if birth_year:
author_insert = '''INSERT INTO ereolen_author(full_name,birth_year) VALUES(%s,%s) RETURNING id'''
cursor.execute(author_insert, (author,birth_year))
else:
author_insert = '''INSERT INTO ereolen_author(full_name) VALUES(%s) RETURNING id'''
cursor.execute(author_insert, (author, ))
author_id = cursor.fetchone()[0]
author_relation_sql = '''INSERT INTO ereolen_title_author (title_id, author_id) VALUES (%s,%s)'''
try:
cursor.execute(author_relation_sql, (title_id,author_id))
except:
connection.rollback()
### SAVE ###
connection.commit()
title_counter += 1
page += 1
connection.close()
print('Ereolen crawl ran')
if title_counter > 0:
print('Added titles on ereolen:', title_counter)
Og i Djangos indbyggede administrationsinterface, kan jeg med fint overblik og gode søgnings-, sorterings- og filtreringsmuligheder få øje på en novellesamling af Georg Metz, der netop er dukket op i eReolen med en “dkcclterm.op”-værdi fra september 2013!

Må jeg prøve?
Jeg ville gerne dele mit værktøj med andre, men det er ikke helt lige til at afklare, hvilke dele af eReolens bogdata, der er frie og offentlige, og hvilke der ejes af en (i mine øjne) lidt underlig konstruktion, der hedder DBC. Et KL-ejet firma (Kommunernes Landsforening), der tjener penge på at sælge data om bøger til – kommuner (og nogle andre aktører, som jeg gætter på næsten udelukkende er offentlige).
Jeg er ved at undersøge, hvad jeg kan offentliggøre uden at genere nogen eller bryde ophavsretsloven. Det kan godt være, det tager lidt tid.
Login på politiken.dk/Medielogin med Python
Her er et eksempel på et lille program, der logger ind på politiken.dk. Det kan (sikkert) nemt tilpasses til Jyllands-Posten og evt. andre steder, der bruger samme loginløsning:
import requests
from bs4 import BeautifulSoup
def check_login_wall_presence(session):
# For verification purposes a shared article with a passage behind loginwall with
# is specified
login_wall_article_url = 'https://politiken.dk/del/_gCmczAApUpA'
passage_from_article = 'varieret kost og begrænset vægtøgning'
check_loginwall = session.get(login_wall_article_url)
if not passage_from_article in check_loginwall.text:
return print('Loginwall is on')
else:
return print('Loginwall is off')
# Initiate a requests session
session = requests.Session()
# Check login wall status
check_login_wall_presence(session)
# Medielogin/Politiken username and password
username = "" # ENTER E-MAIL
password = "" # ENTER PASSWORD
# STEP ONE OF LOGIN: Visit login page in order to set cookies and process form fields
login_page_url = 'https://politiken.dk/medielogin/login'
login_page = session.get(login_page_url)
login_page_soup = BeautifulSoup(login_page.text, "lxml")
login_information = {}
login_page_inputs = login_page_soup.find_all('input')
for input in login_page_inputs:
try:
login_information[input['name']] = input['value']
except:
pass
login_information['Username'] = username
login_information['Password'] = password
# STEP TWO OF LOGIN: Post form data from login page
process_login_url = 'https://medielogin.dk/politiken/login'
step_two_login = session.post(process_login_url, data=login_information)
step_two_login_soup = BeautifulSoup(step_two_login.text, "lxml")
# Get form destination
login_form = step_two_login_soup.find('form')
login_form_destination = login_form['action']
# Process form fields
step_two_information = {}
login_inputs = step_two_login_soup.find_all('input')
for input in login_inputs:
try:
step_two_information[input['name']] = input['value']
except:
pass
# STEP THREE OF LOGIN: Post form data to form destination
complete_login = session.post(login_form_destination, data=step_two_information)
# Check login wall status
check_login_wall_presence(session)
Kodejulekalender
Ovre på https://adventofcode.com/ kan man hver dag finde to kodeopgaver og prøve at løse dem. Det er nærmest som kryds-og-tværs eller sudoko, bare med kode i stedet. Her er mine løsninger.
Dag 11, del 2:
octopi = '''6744638455
3135745418
4754123271
4224257161
8167186546
2268577674
7177768175
2662255275
4655343376
7852526168'''
octopusses =[]
index = 0
line_counter = 0
for oct in octopi:
if oct == "\n":
line = octopusses.append([])
line_counter += 1
elif index == 0:
line = octopusses.append([])
octopusses[line_counter].append(int(oct))
index += 1
else:
octopusses[line_counter].append(int(oct))
number_of_rows = len(octopusses)
numbers_per_row = len(octopusses[0])
def get_valid_neighbours(point):
neighbours = [
[point[0]-1, point[1]],
[point[0]-1, point[1]+1],
[point[0], point[1]+1],
[point[0]+1, point[1]+1],
[point[0]+1, point[1]],
[point[0]+1, point[1]-1],
[point[0], point[1]-1],
[point[0]-1, point[1]-1],
]
valid_neighbours = []
for neighbour in neighbours:
if not neighbour[0] < 0 and not neighbour[0] > number_of_rows - 1 and not neighbour[1] < 0 and not neighbour[1] > numbers_per_row - 1:
valid_neighbours.append(neighbour)
return valid_neighbours
def get_neighbours_with_energy_level_10(neighbours, energy_levels):
ten_count = 0
for neighbour in neighbours:
try:
if energy_levels[neighbour[0]][neighbour[1]] == "flash":
ten_count += 1
except:
print(neighbours)
breakpoint()
return ten_count
def flash_octopusses(energy_levels):
updated_energy_levels = []
row_number = 0
for row in energy_levels:
updated_energy_levels.append([])
column = 0
for octopus in row:
if not octopus == "flash":
if octopus < 10 and not octopus == 0:
neighbours = get_valid_neighbours([row_number, column])
energy_level_rise = get_neighbours_with_energy_level_10(neighbours, energy_levels)
octopus += energy_level_rise
if octopus > 10:
octopus = 10
updated_energy_levels[row_number].append(octopus)
else:
updated_energy_levels[row_number].append(octopus)
else:
updated_energy_levels[row_number].append(octopus)
column += 1
row_number += 1
ten_in_levels = ten_in_energy_levels(updated_energy_levels)
if ten_in_levels:
replace = replace_ten_with_flash_and_flash_with_zero(updated_energy_levels)
else:
return step_octopusses.append(updated_energy_levels)
def replace_ten_with_flash_and_flash_with_zero(energy_levels):
updated_energy_levels = []
row_number = 0
for row in energy_levels:
updated_energy_levels.append([])
column = 0
for octopus in row:
if octopus == 10:
octopus = "flash"
elif octopus == "flash":
octopus = 0
updated_energy_levels[row_number].append(octopus)
column += 1
row_number += 1
return flash_octopusses(updated_energy_levels)
# Determine whether at least one octopus has 10 in energy levels
def ten_in_energy_levels(energy_levels):
for row in energy_levels:
for octopus in row:
if octopus == "flash":
return True
return False
def add_one_to_levels(octopusses):
energy_levels = []
for line in octopusses:
octopi_in_line = []
for octopus in line:
octopus += 1
if octopus == 10:
octopus = "flash"
octopi_in_line.append(octopus)
energy_levels.append(octopi_in_line)
octopusses = energy_levels
return octopusses
# First part of step
step_octopusses = []
number_of_steps = 1000
for i in range(number_of_steps):
octopusses = add_one_to_levels(octopusses)
ten_in_levels = ten_in_energy_levels(octopusses)
if ten_in_levels:
flash_octopusses(octopusses)
octopusses = step_octopusses[-1]
else:
step_octopusses.append(octopusses)
flashes = 0
step_counter = 0
for step in step_octopusses:
flashes_per_step = 0
for row in step:
for number in row:
if number == 0:
flashes += 1
flashes_per_step += 1
step_counter += 1
if flashes_per_step == 100:
print(step_counter)
break
Dag 11, del 1:
Arrghhh.
octopi = '''6744638455
3135745418
4754123271
4224257161
8167186546
2268577674
7177768175
2662255275
4655343376
7852526168'''
octopusses =[]
index = 0
line_counter = 0
for oct in octopi:
if oct == "\n":
line = octopusses.append([])
line_counter += 1
elif index == 0:
line = octopusses.append([])
octopusses[line_counter].append(int(oct))
index += 1
else:
octopusses[line_counter].append(int(oct))
number_of_rows = len(octopusses)
numbers_per_row = len(octopusses[0])
def get_valid_neighbours(point):
neighbours = [
[point[0]-1, point[1]],
[point[0]-1, point[1]+1],
[point[0], point[1]+1],
[point[0]+1, point[1]+1],
[point[0]+1, point[1]],
[point[0]+1, point[1]-1],
[point[0], point[1]-1],
[point[0]-1, point[1]-1],
]
valid_neighbours = []
for neighbour in neighbours:
if not neighbour[0] < 0 and not neighbour[0] > number_of_rows - 1 and not neighbour[1] < 0 and not neighbour[1] > numbers_per_row - 1:
valid_neighbours.append(neighbour)
return valid_neighbours
def get_neighbours_with_energy_level_10(neighbours, energy_levels):
ten_count = 0
for neighbour in neighbours:
try:
if energy_levels[neighbour[0]][neighbour[1]] == "flash":
ten_count += 1
except:
print(neighbours)
breakpoint()
return ten_count
def flash_octopusses(energy_levels):
updated_energy_levels = []
row_number = 0
for row in energy_levels:
updated_energy_levels.append([])
column = 0
for octopus in row:
if not octopus == "flash":
if octopus < 10 and not octopus == 0:
neighbours = get_valid_neighbours([row_number, column])
energy_level_rise = get_neighbours_with_energy_level_10(neighbours, energy_levels)
octopus += energy_level_rise
if octopus > 10:
octopus = 10
updated_energy_levels[row_number].append(octopus)
else:
updated_energy_levels[row_number].append(octopus)
else:
updated_energy_levels[row_number].append(octopus)
column += 1
row_number += 1
ten_in_levels = ten_in_energy_levels(updated_energy_levels)
if ten_in_levels:
replace = replace_ten_with_flash_and_flash_with_zero(updated_energy_levels)
else:
return step_octopusses.append(updated_energy_levels)
def replace_ten_with_flash_and_flash_with_zero(energy_levels):
updated_energy_levels = []
row_number = 0
for row in energy_levels:
updated_energy_levels.append([])
column = 0
for octopus in row:
if octopus == 10:
octopus = "flash"
elif octopus == "flash":
octopus = 0
updated_energy_levels[row_number].append(octopus)
column += 1
row_number += 1
return flash_octopusses(updated_energy_levels)
# Determine whether at least one octopus has 10 in energy levels
def ten_in_energy_levels(energy_levels):
for row in energy_levels:
for octopus in row:
if octopus == "flash":
return True
return False
def add_one_to_levels(octopusses):
energy_levels = []
for line in octopusses:
octopi_in_line = []
for octopus in line:
octopus += 1
if octopus == 10:
octopus = "flash"
octopi_in_line.append(octopus)
energy_levels.append(octopi_in_line)
octopusses = energy_levels
return octopusses
# First part of step
step_octopusses = []
number_of_steps = 100
for i in range(number_of_steps):
octopusses = add_one_to_levels(octopusses)
ten_in_levels = ten_in_energy_levels(octopusses)
if ten_in_levels:
flash_octopusses(octopusses)
octopusses = step_octopusses[-1]
else:
step_octopusses.append(octopusses)
flashes = 0
for step in step_octopusses:
for row in step:
for number in row:
if number == 0:
flashes += 1
print(flashes)
Dag 10, del 2:
lines = []
with open("input_day10.txt", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
lines.append(f)
opening_characters = ["(", "<", "{", "["]
closing_characters = [")", ">", "}", "]"]
def check_numbers_between(match_indexes, character_index, i):
for number_between in range(character_index - i + 1, character_index):
if number_between not in match_indexes:
return False
return True
corrupted_lines = []
illegal_characters = []
for line in lines:
closing_characters_list = sorted([index for index, character in enumerate(line) for closing_character in closing_characters if character == closing_character])
used_character_indexes = []
match_indexes = []
for character_index in closing_characters_list:
closing_character = line[character_index]
correct_opening_character = opening_characters[closing_characters.index(closing_character)]
found_a_match = False
for i in range(1, character_index + 1, 2):
if line[character_index - i] == correct_opening_character and character_index - i not in used_character_indexes:
used_character_indexes.append(character_index - i)
# Check in match_indexes whether everything within a match range is covered by already detected matches
if i > 1:
found_a_match = check_numbers_between(match_indexes, character_index, i)
if found_a_match == False:
break
match_indexes.append(character_index - i)
match_indexes.append(character_index)
found_a_match = True
break
if found_a_match == False:
illegal_characters.append(closing_character)
corrupted_lines.append(line)
break
incomplete_lines = [line for line in lines if line not in corrupted_lines]
scores = []
for line in incomplete_lines:
closing_characters_list = sorted([index for index, character in enumerate(line) for closing_character in closing_characters if character == closing_character])
used_character_indexes = []
match_indexes = []
for character_index in closing_characters_list:
closing_character = line[character_index]
correct_opening_character = opening_characters[closing_characters.index(closing_character)]
found_a_match = False
for i in range(1, character_index + 1, 2):
if line[character_index - i] == correct_opening_character and character_index - i not in used_character_indexes:
used_character_indexes.append(character_index - i)
match_indexes.append(character_index - i)
match_indexes.append(character_index)
break
indexes_of_tags_to_close = []
for i in range(len(line)):
if i not in match_indexes:
indexes_of_tags_to_close.append(i)
end_string = ""
for index in reversed(indexes_of_tags_to_close):
character = line[index]
open_character_position = opening_characters.index(character)
closing_character = closing_characters[open_character_position]
end_string += closing_character
score = 0
for character in end_string:
score = 5 * score
if character == ")":
score += 1
elif character == "]":
score += 2
elif character == "}":
score += 3
elif character == ">":
score += 4
scores.append(score)
sorted_scores = sorted(scores)
scores_length = len(sorted_scores)
middle_score = sorted_scores[int((scores_length-1)/2)]
print(middle_score)
Dag 10, del 1:
Nu er jeg ved at nå grænsen for, hvad jeg kan finde ud af inden for rimelig tid.
lines = []
with open("input_day10.txt", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
lines.append(f)
opening_characters = ["(", "<", "{", "["]
closing_characters = [")", ">", "}", "]"]
def check_numbers_between(match_indexes, character_index, i):
for number_between in range(character_index - i + 1, character_index):
if number_between not in match_indexes:
return False
return True
illegal_characters = []
for line in lines:
closing_characters_list = sorted([index for index, character in enumerate(line) for closing_character in closing_characters if character == closing_character])
used_character_indexes = []
match_indexes = []
for character_index in closing_characters_list:
closing_character = line[character_index]
correct_opening_character = opening_characters[closing_characters.index(closing_character)]
found_a_match = False
for i in range(1, character_index + 1, 2):
if line[character_index - i] == correct_opening_character and character_index - i not in used_character_indexes:
used_character_indexes.append(character_index - i)
# Check in match_indexes whether everything within a match range is covered by already detected matches
if i > 1:
found_a_match = check_numbers_between(match_indexes, character_index, i)
if found_a_match == False:
break
match_indexes.append(character_index - i)
match_indexes.append(character_index)
found_a_match = True
break
if found_a_match == False:
illegal_characters.append(closing_character)
break
points = 0
for character in illegal_characters:
if character == closing_characters[0]:
points += 3
elif character == closing_characters[1]:
points += 25137
elif character == closing_characters[2]:
points += 1197
elif character == closing_characters[3]:
points += 57
print(points)
Dag 9, del 2:
points = []
with open("input_day9", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
points.append([int(value) for value in f])
number_of_rows = len(points)
numbers_per_row = len(points[0])
low_point_coordinates = []
low_points = []
row_index = 0
for row in points:
point_index = 0
for point in row:
# First row
if row_index == 0 and point_index == 0:
if row[point_index + 1] > point and points[row_index + 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
elif row_index == 0 and point_index == numbers_per_row - 1:
if row[point_index - 1] > point and points[row_index + 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
elif row_index == 0:
if row[point_index + 1] > point and row[point_index - 1] > point and points[row_index + 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
elif row_index == number_of_rows - 1 and point_index == 0:
if row[point_index + 1] > point and points[row_index - 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
elif row_index == number_of_rows - 1 and point_index == numbers_per_row - 1:
if row[point_index - 1] > point and points[row_index - 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
elif row_index == number_of_rows - 1:
if row[point_index + 1] > point and row[point_index - 1] > point and points[row_index - 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
elif point_index == 0:
if row[point_index + 1] > point and points[row_index + 1][point_index] > point and points[row_index - 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
elif point_index == numbers_per_row - 1:
if row[point_index - 1] > point and points[row_index + 1][point_index] > point and points[row_index - 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
else:
if row[point_index + 1] > point and row[point_index - 1] > point and points[row_index + 1][point_index] > point and points[row_index - 1][point_index] > point:
low_point_coordinates.append([row_index, point_index])
low_points.append(point + 1)
point_index += 1
row_index += 1
def get_neighbours(point):
neighbours = [
[point[0], point[1]+1],
[point[0], point[1]-1],
[point[0]+1, point[1]],
[point[0]-1, point[1]],
]
for neighbour in neighbours:
if not neighbour[0] < 0 and not neighbour[0] > number_of_rows - 1 and not neighbour[1] < 0 and not neighbour[1] > numbers_per_row - 1:
if points[neighbour[0]][neighbour[1]] != 9 and neighbour not in valid_neighbours:
valid_neighbours.append(neighbour)
get_neighbours(neighbour)
return valid_neighbours
basins = []
for low_point in low_point_coordinates:
valid_neighbours = []
valid_neighbours.append(low_point)
neighbours = get_neighbours(low_point)
basins.append(neighbours)
basins.sort(key=len, reverse=True)
product_of_large_basins = len(basins[0]) * len(basins[1]) * len(basins[2])
print(product_of_large_basins)
Dag 9, del 1:
points = []
with open("input_day9", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
points.append([int(value) for value in f])
number_of_rows = len(points)
numbers_per_row = len(points[0])
low_points = []
row_index = 0
for row in points:
point_index = 0
for point in row:
# First row
if row_index == 0 and point_index == 0:
if row[point_index + 1] > point and points[row_index + 1][point_index] > point:
low_points.append(point + 1)
elif row_index == 0 and point_index == numbers_per_row - 1:
if row[point_index - 1] > point and points[row_index + 1][point_index] > point:
low_points.append(point + 1)
elif row_index == 0:
if row[point_index + 1] > point and row[point_index - 1] > point and points[row_index + 1][point_index] > point:
low_points.append(point + 1)
elif row_index == number_of_rows - 1 and point_index == 0:
if row[point_index + 1] > point and points[row_index - 1][point_index] > point:
low_points.append(point + 1)
elif row_index == number_of_rows - 1 and point_index == numbers_per_row - 1:
if row[point_index - 1] > point and points[row_index - 1][point_index] > point:
low_points.append(point + 1)
elif row_index == number_of_rows - 1:
if row[point_index + 1] > point and row[point_index - 1] > point and points[row_index - 1][point_index] > point:
low_points.append(point + 1)
elif point_index == 0:
if row[point_index + 1] > point and points[row_index + 1][point_index] > point and points[row_index - 1][point_index] > point:
low_points.append(point + 1)
elif point_index == numbers_per_row - 1:
if row[point_index - 1] > point and points[row_index + 1][point_index] > point and points[row_index - 1][point_index] > point:
low_points.append(point + 1)
else:
if row[point_index + 1] > point and row[point_index - 1] > point and points[row_index + 1][point_index] > point and points[row_index - 1][point_index] > point:
low_points.append(point + 1)
point_index += 1
row_index += 1
print(sum(low_points))
Dag 8, del 2:
def get_mapping(signals):
zero = ""
one = ""
two = ""
three = ""
four = ""
five = ""
six = ""
seven = ""
eight = ""
nine = ""
digits = {
5: [],
6: []
}
for signal in signals:
if len(signal) == 2:
one = signal
elif len(signal) == 3:
seven = signal
elif len(signal) == 4:
four = signal
elif len(signal) == 7:
eight = signal
elif len(signal) == 5:
digits[5].append(signal)
elif len(signal) == 6:
digits[6].append(signal)
# Only one of 2, 3 and 5 (five segments) has both segments of 1: 3
for signal in digits[5]:
count = 0
for letter in one:
if letter in signal:
count += 1
if count == 2:
three = signal
digits[5].remove(signal)
# Of 2 and 5, only 5 has both b and d from 4, which can be determined by getting the difference between 4 and 1:
four_one_difference = ""
for character in four:
if character not in one:
four_one_difference += character
for signal in digits[5]:
for character in four_one_difference:
if character not in signal:
two = signal
digits[5].remove(signal)
five = digits[5][0]
# Zero is the only 6-segment number that doesn't have d
for signal in digits[6]:
for character in four_one_difference:
if character not in signal:
zero = signal
digits[6].remove(signal)
# Six does not have both c and f from one
for signal in digits[6]:
for character in one:
if character not in signal:
six = signal
digits[6].remove(signal)
nine = digits[6][0]
return [zero,one,two,three,four,five,six,seven,eight,nine]
sum_of_output_values = 0
with open("input_day8", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
signals = f[:f.index(" | ")].split(" ")
mappings = get_mapping(signals)
mappings_alphabetical = [sorted(list(digit)) for digit in mappings]
output = f[f.index(" | ")+3:].split(" ")
output_alphabetical = [sorted(list(digit)) for digit in output]
output_value = ""
for value in output_alphabetical:
number = mappings_alphabetical.index(value)
output_value += str(number)
output_value = int(output_value)
sum_of_output_values += output_value
print(sum_of_output_values)
Dag 8, del 1:
segment_lengths = [2,3,4,7]
count_of_unique_digits = 0
with open("input_day8", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
output = f[f.index(" | ")+3:].split(" ")
for value in output:
if len(value) in segment_lengths:
count_of_unique_digits += 1
print(count_of_unique_digits)
Dag 7, del 2:
initial = [1101,1,29,67,1102,0,1,65,1008,65,35,66,1005,66,28,1,67,65,20,4,0,1001,65,1,65,1106,0,8,99,35,67,101,99,105,32,110,39,101,115,116,32,112,97,115,32,117,110,101,32,105,110,116,99,111,100,101,32,112,114,111,103,114,97,109,10,478,1187,253,1892,900,155,20,787,17,248,1397,407,167,686,638,1020,960,124,840,220,1824,700,373,4,551,229,294,567,254,350,1144,679,124,361,145,483,335,202,1334,367,60,870,11,557,482,645,672,1296,1538,427,78,542,1135,13,65,0,140,705,13,642,187,1085,36,1118,349,601,382,584,941,26,949,200,763,198,430,204,1352,1135,210,342,11,1089,830,1523,9,523,167,762,254,805,8,132,29,102,1299,936,756,59,134,183,235,316,139,48,182,44,88,213,113,93,169,565,601,1899,1191,189,796,770,32,1183,365,374,867,918,1084,86,75,20,47,99,1140,2,99,1024,366,455,752,556,1220,66,326,450,213,1,342,756,49,675,160,280,68,221,193,379,88,179,94,16,109,570,1145,1207,824,355,1389,1601,168,86,236,923,120,759,14,478,460,84,167,1723,1005,269,6,171,861,311,832,952,701,3,1598,1466,96,780,57,161,631,572,276,105,594,276,17,405,688,1444,173,23,199,177,689,19,565,472,151,986,76,379,1430,212,928,106,25,143,84,833,942,860,1555,271,239,720,596,1209,235,535,361,1794,79,283,275,17,342,1687,1434,173,967,740,217,1370,18,1579,1259,546,94,623,475,834,1000,456,101,520,120,1023,360,167,213,617,42,1149,629,760,17,33,27,1347,414,646,1116,1340,134,259,143,407,249,328,968,677,241,438,98,313,27,791,1,634,3,918,1482,213,123,444,45,24,26,26,1203,64,67,1562,1,4,298,12,384,32,443,37,268,674,356,202,286,694,272,163,950,1022,54,59,21,73,519,462,106,76,1112,10,72,388,194,6,120,9,645,209,1121,75,599,362,661,439,69,62,339,390,23,1247,365,1266,4,246,511,47,467,134,276,497,130,458,427,669,1191,701,917,168,1191,294,641,236,801,375,106,872,800,87,356,583,1096,253,459,951,1331,719,66,1091,525,15,370,290,141,1201,30,43,37,76,1131,616,297,172,402,1016,654,301,63,872,303,69,1195,502,351,52,1659,86,104,294,807,166,120,190,333,60,283,819,198,184,144,278,343,1395,496,103,705,485,172,642,225,181,583,188,38,436,801,91,5,634,180,28,20,146,488,676,121,420,965,220,1564,1011,241,423,3,1631,709,106,725,164,1032,65,205,503,188,397,1072,49,121,761,721,249,418,87,126,258,712,500,435,157,127,681,108,270,647,504,505,83,407,212,165,1177,160,715,1292,491,195,141,25,829,1316,242,754,364,1707,33,594,434,488,368,298,183,1156,29,1674,537,378,8,9,860,240,571,749,471,331,501,156,62,427,1103,52,12,832,1198,284,388,827,556,194,288,218,397,84,1485,95,401,739,986,994,305,668,1324,1437,312,993,15,822,923,707,135,42,423,37,1183,1344,997,19,699,395,119,7,168,1711,50,151,38,20,163,686,1364,21,24,411,32,335,188,55,628,274,1766,439,180,286,1024,87,15,1498,290,561,971,32,294,67,113,219,42,18,715,3,664,242,583,221,1045,236,74,46,1612,639,325,164,100,69,518,38,502,26,329,112,1174,127,124,90,144,527,468,152,1098,800,125,349,191,290,191,27,651,446,267,9,1304,269,586,64,983,152,236,512,8,248,177,109,311,957,47,126,69,13,709,204,381,1151,580,340,994,865,258,190,9,1149,930,1128,321,100,471,0,507,1308,326,585,813,1088,76,174,333,387,631,186,430,988,24,820,11,45,173,167,1494,98,1467,456,167,21,1363,1173,394,318,1601,1111,1249,757,282,672,1227,1214,277,336,815,136,1192,681,689,431,130,1488,154,465,14,709,339,1123,68,151,1280,143,1797,23,250,1231,1007,302,1103,2,585,552,1732,994,225,771,1495,82,229,700,910,15,38,159,1122,316,1044,711,1436,920,1722,523,1398,188,443,1032,93,33,397,272,187,24,489,53,79,1277,671,1094,68,1705,984,1096,512,145,389,167,161,1174,94,4,534,1295,648,75,24,366,995,175,220,714,843,412,267,634,1209,66,1094,125,822,1114,1513,694,1520,30,676,817,245,26,77,1146,552,143,165,39,343,971,87,0,90,1434,588,616,99,297,1034,114,5,702,917,582,733,31,54,820,0,212,192,282,33,639,1661,460,75,680,115,178,194,271,274,582,1008,89,139,611,707,0,376,65,9,161,135,40,134,566,66,601,95,817,745,202,352,447,322,842,6,1247,175,468,330,608,368,139,21,29,486,121,9,1293,298,73,328,302,145,889,1794,677,56,952,520,80]
minimum_value = min(initial)
maximum_value = max(initial)
def get_fuel_use(distance):
fuel_use = 0
if distance == 0:
return fuel_use
for i in range(distance+1):
fuel_use += i
return fuel_use
distances = {}
for i in range(minimum_value, maximum_value+1):
distances[i] = 0
for crab in initial:
if i > crab:
distance = i - crab
elif i < crab:
distance = crab - i
else:
distance = 0
fuel_use = get_fuel_use(distance)
distances[i] += fuel_use
minimum_fuel_consumption = 0
first_run = True
for position, fuel_consumption in distances.items():
if first_run == True:
minimum_fuel_consumption = fuel_consumption
first_run = False
elif fuel_consumption < minimum_fuel_consumption:
minimum_fuel_consumption = fuel_consumption
print(minimum_fuel_consumption)
Dag 7, del 1:
initial = [1101,1,29,67,1102,0,1,65,1008,65,35,66,1005,66,28,1,67,65,20,4,0,1001,65,1,65,1106,0,8,99,35,67,101,99,105,32,110,39,101,115,116,32,112,97,115,32,117,110,101,32,105,110,116,99,111,100,101,32,112,114,111,103,114,97,109,10,478,1187,253,1892,900,155,20,787,17,248,1397,407,167,686,638,1020,960,124,840,220,1824,700,373,4,551,229,294,567,254,350,1144,679,124,361,145,483,335,202,1334,367,60,870,11,557,482,645,672,1296,1538,427,78,542,1135,13,65,0,140,705,13,642,187,1085,36,1118,349,601,382,584,941,26,949,200,763,198,430,204,1352,1135,210,342,11,1089,830,1523,9,523,167,762,254,805,8,132,29,102,1299,936,756,59,134,183,235,316,139,48,182,44,88,213,113,93,169,565,601,1899,1191,189,796,770,32,1183,365,374,867,918,1084,86,75,20,47,99,1140,2,99,1024,366,455,752,556,1220,66,326,450,213,1,342,756,49,675,160,280,68,221,193,379,88,179,94,16,109,570,1145,1207,824,355,1389,1601,168,86,236,923,120,759,14,478,460,84,167,1723,1005,269,6,171,861,311,832,952,701,3,1598,1466,96,780,57,161,631,572,276,105,594,276,17,405,688,1444,173,23,199,177,689,19,565,472,151,986,76,379,1430,212,928,106,25,143,84,833,942,860,1555,271,239,720,596,1209,235,535,361,1794,79,283,275,17,342,1687,1434,173,967,740,217,1370,18,1579,1259,546,94,623,475,834,1000,456,101,520,120,1023,360,167,213,617,42,1149,629,760,17,33,27,1347,414,646,1116,1340,134,259,143,407,249,328,968,677,241,438,98,313,27,791,1,634,3,918,1482,213,123,444,45,24,26,26,1203,64,67,1562,1,4,298,12,384,32,443,37,268,674,356,202,286,694,272,163,950,1022,54,59,21,73,519,462,106,76,1112,10,72,388,194,6,120,9,645,209,1121,75,599,362,661,439,69,62,339,390,23,1247,365,1266,4,246,511,47,467,134,276,497,130,458,427,669,1191,701,917,168,1191,294,641,236,801,375,106,872,800,87,356,583,1096,253,459,951,1331,719,66,1091,525,15,370,290,141,1201,30,43,37,76,1131,616,297,172,402,1016,654,301,63,872,303,69,1195,502,351,52,1659,86,104,294,807,166,120,190,333,60,283,819,198,184,144,278,343,1395,496,103,705,485,172,642,225,181,583,188,38,436,801,91,5,634,180,28,20,146,488,676,121,420,965,220,1564,1011,241,423,3,1631,709,106,725,164,1032,65,205,503,188,397,1072,49,121,761,721,249,418,87,126,258,712,500,435,157,127,681,108,270,647,504,505,83,407,212,165,1177,160,715,1292,491,195,141,25,829,1316,242,754,364,1707,33,594,434,488,368,298,183,1156,29,1674,537,378,8,9,860,240,571,749,471,331,501,156,62,427,1103,52,12,832,1198,284,388,827,556,194,288,218,397,84,1485,95,401,739,986,994,305,668,1324,1437,312,993,15,822,923,707,135,42,423,37,1183,1344,997,19,699,395,119,7,168,1711,50,151,38,20,163,686,1364,21,24,411,32,335,188,55,628,274,1766,439,180,286,1024,87,15,1498,290,561,971,32,294,67,113,219,42,18,715,3,664,242,583,221,1045,236,74,46,1612,639,325,164,100,69,518,38,502,26,329,112,1174,127,124,90,144,527,468,152,1098,800,125,349,191,290,191,27,651,446,267,9,1304,269,586,64,983,152,236,512,8,248,177,109,311,957,47,126,69,13,709,204,381,1151,580,340,994,865,258,190,9,1149,930,1128,321,100,471,0,507,1308,326,585,813,1088,76,174,333,387,631,186,430,988,24,820,11,45,173,167,1494,98,1467,456,167,21,1363,1173,394,318,1601,1111,1249,757,282,672,1227,1214,277,336,815,136,1192,681,689,431,130,1488,154,465,14,709,339,1123,68,151,1280,143,1797,23,250,1231,1007,302,1103,2,585,552,1732,994,225,771,1495,82,229,700,910,15,38,159,1122,316,1044,711,1436,920,1722,523,1398,188,443,1032,93,33,397,272,187,24,489,53,79,1277,671,1094,68,1705,984,1096,512,145,389,167,161,1174,94,4,534,1295,648,75,24,366,995,175,220,714,843,412,267,634,1209,66,1094,125,822,1114,1513,694,1520,30,676,817,245,26,77,1146,552,143,165,39,343,971,87,0,90,1434,588,616,99,297,1034,114,5,702,917,582,733,31,54,820,0,212,192,282,33,639,1661,460,75,680,115,178,194,271,274,582,1008,89,139,611,707,0,376,65,9,161,135,40,134,566,66,601,95,817,745,202,352,447,322,842,6,1247,175,468,330,608,368,139,21,29,486,121,9,1293,298,73,328,302,145,889,1794,677,56,952,520,80]
minimum_value = min(initial)
maximum_value = max(initial)
distances = {}
for i in range(minimum_value, maximum_value+1):
distances[i] = 0
for crab in initial:
if i > crab:
distance = i - crab
elif i < crab:
distance = crab - i
else:
distance = 0
distances[i] += distance
minimum_fuel_consumption = 0
first_run = True
for position, fuel_consumption in distances.items():
if first_run == True:
minimum_fuel_consumption = fuel_consumption
first_run = False
elif fuel_consumption < minimum_fuel_consumption:
minimum_fuel_consumption = fuel_consumption
print(minimum_fuel_consumption)
Dag 6, del 2:
initial = [3,4,1,1,5,1,3,1,1,3,5,1,1,5,3,2,4,2,2,2,1,1,1,1,5,1,1,1,1,1,3,1,1,5,4,1,1,1,4,1,1,1,1,2,3,2,5,1,5,1,2,1,1,1,4,1,1,1,1,3,1,1,3,1,1,1,1,1,1,2,3,4,2,1,3,1,1,2,1,1,2,1,5,2,1,1,1,1,1,1,4,1,1,1,1,5,1,4,1,1,1,3,3,1,3,1,3,1,4,1,1,1,1,1,4,5,1,1,3,2,2,5,5,4,3,1,2,1,1,1,4,1,3,4,1,1,1,1,2,1,1,3,2,1,1,1,1,1,4,1,1,1,4,4,5,2,1,1,1,1,1,2,4,2,1,1,1,2,1,1,2,1,5,1,5,2,5,5,1,1,3,1,4,1,1,1,1,1,1,1,4,1,1,4,1,1,1,1,1,2,1,2,1,1,1,5,1,1,3,5,1,1,5,5,3,5,3,4,1,1,1,3,1,1,3,1,1,1,1,1,1,5,1,3,1,5,1,1,4,1,3,1,1,1,2,1,1,1,2,1,5,1,1,1,1,4,1,3,2,3,4,1,3,5,3,4,1,4,4,4,1,3,2,4,1,4,1,1,2,1,3,1,5,5,1,5,1,1,1,5,2,1,2,3,1,4,3,3,4,3]
counts = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
}
for i in initial:
counts[i] += 1
for day in range(1,257):
new_spawn_and_reset = counts[0]
for i in range(0,9):
if i < 8:
counts[i] = counts[i+1]
if i == 6:
counts[i] += new_spawn_and_reset
if i == 8:
counts[i] = new_spawn_and_reset
sum_of_counts = 0
for key, value in counts.items():
sum_of_counts += value
print("fish: ", sum_of_counts)
Dag 6, del 1:
initial = [3,4,1,1,5,1,3,1,1,3,5,1,1,5,3,2,4,2,2,2,1,1,1,1,5,1,1,1,1,1,3,1,1,5,4,1,1,1,4,1,1,1,1,2,3,2,5,1,5,1,2,1,1,1,4,1,1,1,1,3,1,1,3,1,1,1,1,1,1,2,3,4,2,1,3,1,1,2,1,1,2,1,5,2,1,1,1,1,1,1,4,1,1,1,1,5,1,4,1,1,1,3,3,1,3,1,3,1,4,1,1,1,1,1,4,5,1,1,3,2,2,5,5,4,3,1,2,1,1,1,4,1,3,4,1,1,1,1,2,1,1,3,2,1,1,1,1,1,4,1,1,1,4,4,5,2,1,1,1,1,1,2,4,2,1,1,1,2,1,1,2,1,5,1,5,2,5,5,1,1,3,1,4,1,1,1,1,1,1,1,4,1,1,4,1,1,1,1,1,2,1,2,1,1,1,5,1,1,3,5,1,1,5,5,3,5,3,4,1,1,1,3,1,1,3,1,1,1,1,1,1,5,1,3,1,5,1,1,4,1,3,1,1,1,2,1,1,1,2,1,5,1,1,1,1,4,1,3,2,3,4,1,3,5,3,4,1,4,4,4,1,3,2,4,1,4,1,1,2,1,3,1,5,5,1,5,1,1,1,5,2,1,2,3,1,4,3,3,4,3]
for day in range(1,81):
index = 0
for fish in initial:
fish -= 1
if fish == -1:
fish = 6
initial.append(9)
initial[index] = fish
index += 1
print(len(initial))
Dag 5, del 2:
straight_lines = []
diagonal_lines = []
x_values = []
y_values = []
with open("input_day5.txt", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
x1 = int(f[:f.index(",")])
y1 = int(f[f.index(",")+1:f.index(" -> ")])
x2 = int(f[f.index(" -> ")+4:f.index(",",f.index(" -> ")+4)])
y2 = int(f[f.index(",",f.index(" -> "))+1:])
x_values.append(x1)
x_values.append(x2)
y_values.append(y1)
y_values.append(y2)
if x1 == x2 or y1 == y2:
straight_lines.append([(x1, y1), (x2, y2)])
elif x1 != x2 and y1 != y2:
diagonal_lines.append([(x1, y1), (x2, y2)])
coordinates = {}
for x in range(max(x_values)+1):
for y in range(max(y_values)+1):
coordinates[str(x) + "," + str(y)] = 0
def add_line_to_coordinates(x,y):
key = str(x) + "," + str(y)
coordinates[key] += 1
for line in diagonal_lines:
if line[0][0] > line[1][0] and line[0][1] > line[1][1]:
number_of_coordinates = line[0][0] - line[1][0]
for i in range(number_of_coordinates + 1):
add_line_to_coordinates(line[1][0] + i, line[1][1] + i)
elif line[0][0] > line[1][0] and line[0][1] < line[1][1]:
number_of_coordinates = line[0][0] - line[1][0]
for i in range(number_of_coordinates + 1):
add_line_to_coordinates(line[1][0] + i, line[1][1] - i)
elif line[0][0] < line[1][0] and line[0][1] > line[1][1]:
number_of_coordinates = line[1][0] - line[0][0]
for i in range(number_of_coordinates + 1):
add_line_to_coordinates(line[0][0] + i, line[0][1] - i)
elif line[0][0] < line[1][0] and line[0][1] < line[1][1]:
number_of_coordinates = line[1][0] - line[0][0]
for i in range(number_of_coordinates + 1):
add_line_to_coordinates(line[0][0] + i, line[0][1] + i)
for line in straight_lines:
if line[0][0] == line[1][0]: # x values are equal
if line[0][1] < line[1][1]: # first y value is lowest
for i in range(line[0][1], line[1][1]+1):
add_line_to_coordinates(line[0][0], i)
elif line[0][1] > line[1][1]: # second y value is lowest
for i in range(line[1][1], line[0][1]+1):
add_line_to_coordinates(line[0][0], i)
else: # y values are equal, so only one point
add_line_to_coordinates(line[0][0], line[0][1])
else: # y values are equeal
if line[0][0] < line[1][0]: # first x value is lowest
for i in range(line[0][0], line[1][0]+1):
add_line_to_coordinates(i, line[0][1])
elif line[0][0] > line[1][0]: # second x value is lowest
for i in range(line[1][0], line[0][0]+1):
add_line_to_coordinates(i, line[0][1])
else:
add_line_to_coordinates(line[0][0], line[0][1])
double_hits = 0
for coordinate, number_of_hits in coordinates.items():
if number_of_hits > 1:
double_hits += 1
print(double_hits)
Dag 5, del 1:
straight_lines = []
x_values = []
y_values = []
with open("input_day5.txt", "r", encoding="utf8") as fin:
for f in fin:
f = f.replace("\n","")
x1 = int(f[:f.index(",")])
y1 = int(f[f.index(",")+1:f.index(" -> ")])
x2 = int(f[f.index(" -> ")+4:f.index(",",f.index(" -> ")+4)])
y2 = int(f[f.index(",",f.index(" -> "))+1:])
if x1 == x2 or y1 == y2:
x_values.append(x1)
x_values.append(x2)
y_values.append(y1)
y_values.append(y2)
straight_lines.append([(x1, y1), (x2, y2)])
coordinates = {}
for x in range(max(x_values)+1):
for y in range(max(y_values)+1):
coordinates[str(x) + "," + str(y)] = 0
def add_line_to_coordinates(x,y):
key = str(x) + "," + str(y)
coordinates[key] += 1
for line in straight_lines:
if line[0][0] == line[1][0]: # x values are equal
if line[0][1] < line[1][1]: # first y value is lowest
for i in range(line[0][1], line[1][1]+1):
add_line_to_coordinates(line[0][0], i)
elif line[0][1] > line[1][1]: # second y value is lowest
for i in range(line[1][1], line[0][1]+1):
add_line_to_coordinates(line[0][0], i)
else: # y values are equal, so only one point
add_line_to_coordinates(line[0][0], line[0][1])
else: # y values are equeal
if line[0][0] < line[1][0]: # first x value is lowest
for i in range(line[0][0], line[1][0]+1):
add_line_to_coordinates(i, line[0][1])
elif line[0][0] > line[1][0]: # second x value is lowest
for i in range(line[1][0], line[0][0]+1):
add_line_to_coordinates(i, line[0][1])
else:
add_line_to_coordinates(line[0][0], line[0][1])
double_hits = 0
for coordinate, number_of_hits in coordinates.items():
if number_of_hits > 1:
double_hits += 1
print(double_hits)
Dag 4, del 2:
boards = []
with open("input_day4.txt", "r", encoding="utf8") as fin:
counter = 0
sub_counter = 0
board = []
for f in fin:
if counter == 0:
f = f.replace("\n","")
drawn_numbers = f.split(",")
elif f == '\n':
sub_counter = 0
board = []
else:
if f[0] == " ":
f = f[1:]
if " " in f:
f = f.replace(" "," ")
f = f.replace("\n","")
board.extend(f.split(" "))
sub_counter += 1
if sub_counter == 5:
board = [b.replace("\n","") for b in board]
boards.append(board)
counter = 1
def check_board(board):
i = 0
while i < 26:
d_count = 0
for number in board[i:i+5]:
if 'd' in number:
d_count += 1
if d_count == 5:
return True
i += 5
row = 0
while row < 5:
i = row
d_count = 0
while i < row + 25:
if 'd' in board[i]:
d_count += 1
if d_count == 5:
return True
i += 5
row += 1
def calculate_score(board, draw):
sum_of_numbers = 0
for number in board:
if not 'd' in number:
sum_of_numbers += int(number)
return sum_of_numbers * int(draw)
winning_boards = []
scores = []
draw_counter = 0
end_loop = False
for draw in drawn_numbers:
board_counter = 0
for board in boards:
if board not in winning_boards:
number_counter = 0
for number in board:
if number == draw:
boards[board_counter][number_counter] += 'd'
number_counter += 1
bingo = check_board(board)
if bingo == True:
winning_boards.append(board)
scores.append(calculate_score(board, draw))
board_counter += 1
draw_counter += 1
print(scores[-1])
Dag 4, del 1:
boards = []
with open("input_day4.txt", "r", encoding="utf8") as fin:
counter = 0
sub_counter = 0
board = []
for f in fin:
if counter == 0:
f = f.replace("\n","")
drawn_numbers = f.split(",")
elif f == '\n':
sub_counter = 0
board = []
else:
if f[0] == " ":
f = f[1:]
if " " in f:
f = f.replace(" "," ")
f = f.replace("\n","")
board.extend(f.split(" "))
sub_counter += 1
if sub_counter == 5:
board = [b.replace("\n","") for b in board]
boards.append(board)
counter = 1
def check_board(board):
i = 0
while i < 26:
d_count = 0
for number in board[i:i+5]:
if 'd' in number:
d_count += 1
if d_count == 5:
return True
i += 5
row = 0
while row < 5:
i = row
d_count = 0
while i < row + 25:
if 'd' in board[i]:
d_count += 1
if d_count == 5:
return True
i += 5
row += 1
def calculate_score(board, draw):
sum_of_numbers = 0
for number in board:
if not 'd' in number:
sum_of_numbers += int(number)
return sum_of_numbers * int(draw)
def play_bingo():
draw_counter = 0
end_loop = False
for draw in drawn_numbers:
board_counter = 0
for board in boards:
number_counter = 0
for number in board:
if number == draw:
boards[board_counter][number_counter] += 'd'
number_counter += 1
bingo = check_board(board)
if bingo == True:
return calculate_score(board, draw)
board_counter += 1
draw_counter += 1
score = play_bingo()
print(score)
Dag 3, del 2:
rates = []
with open("input_day3", "r", encoding="utf8") as fin:
for f in fin:
rates.append(f)
oxygen_rates = rates
co2_rates = rates
for i in range(len(rates[0])-1):
zero_count = 0
one_count = 0
for rate in oxygen_rates:
if rate[i] == '0':
zero_count += 1
elif rate[i] == '1':
one_count += 1
if zero_count > one_count:
oxygen_rates = [rate for rate in oxygen_rates if rate[i] == '0']
elif one_count > zero_count or one_count == zero_count:
oxygen_rates = [rate for rate in oxygen_rates if rate[i] == '1']
if len(oxygen_rates) == 1:
oxygen = oxygen_rates[0]
zero_count = 0
one_count = 0
for rate in co2_rates:
if rate[i] == '0':
zero_count += 1
elif rate[i] == '1':
one_count += 1
if zero_count > one_count:
co2_rates = [rate for rate in co2_rates if rate[i] == '1']
elif one_count > zero_count or one_count == zero_count:
co2_rates = [rate for rate in co2_rates if rate[i] == '0']
if len(co2_rates) == 1:
co2 = co2_rates[0]
print(int(oxygen,2)*int(co2,2))
Dag 3, del 1:
rates = []
with open("input_day3", "r", encoding="utf8") as fin:
for f in fin:
rates.append(f)
gamma_rate = ""
epsilon_rate = ""
for i in range(len(rates[0])-1):
zero_count = 0
one_count = 0
for rate in rates:
if int(rate[i]) == 0:
zero_count += 1
elif int(rate[i]) == 1:
one_count += 1
if zero_count > one_count:
gamma_rate += '0'
epsilon_rate += '1'
else:
gamma_rate += '1'
epsilon_rate += '0'
print(int(gamma_rate,2) * int(epsilon_rate,2))
Dag 2, del 2:
increase_count = 0
loop_count = 0
position_list = []
with open("input_day2.txt", "r", encoding="utf8") as fin:
for f in fin:
position_list.append(f)
horisontal_position = 0
aim = 0
depth = 0
for move in position_list:
if 'forward ' in move:
number = int(move.replace('forward ',''))
horisontal_position += number
if aim > 0:
depth += number * aim
elif 'down ' in move:
number = int(move.replace('down ',''))
aim += number
elif 'up ' in move:
number = int(move.replace('up ',''))
aim -= number
else:
breakpoint()
print(horisontal_position*depth)
Dag 2, del 1:
increase_count = 0
loop_count = 0
position_list = []
with open("input_day2.txt", "r", encoding="utf8") as fin:
for f in fin:
position_list.append(f)
forward_position = 0
depth = 0
for move in position_list:
if 'forward ' in move:
number = int(move.replace('forward ',''))
forward_position += number
elif 'down ' in move:
number = int(move.replace('down ',''))
depth += number
elif 'up ' in move:
number = int(move.replace('up ',''))
depth -= number
else:
breakpoint()
print(forward_position*depth)
Dag 1, del 2:
increase_count = 0
loop_count = 0
number_list = []
with open("input.txt", "r", encoding="utf8") as fin:
for f in fin:
number_list.append(int(f))
index_start = 0
index_end = 3
while index_end <= len(number_list):
print(number_list[index_start:index_end])
if loop_count == 0:
last_sum = sum(number_list[index_start:index_end])
loop_count += 1
index_start += 1
index_end += 1
else:
new_sum = sum(number_list[index_start:index_end])
if new_sum > last_sum:
increase_count += 1
index_start += 1
index_end += 1
last_sum = new_sum
print(increase_count)
Dag 1, del 1:
increase_count = 0
loop_count = 0
with open("input.txt", "r", encoding="utf8") as fin:
for f in fin:
number = int(f)
if loop_count == 0:
last_number = number
loop_count += 1
continue
else:
if number > last_number:
increase_count += 1
last_number = number
print(increase_count)
Optimering af indsamling af Borgerforslagsdata
For et par uger siden skrev jeg om en lille robot, jeg har lavet, der tjekker antallet af stemmer per borgerforslag på borgerforslag.dk.
Første udgave af robotten gemte det aktuelle stemmeantal for hvert aktivt borgerforslag hvert 10. minut, og da der både er en del borgerforslag og en del minutter, blev det ret hurtigt til ret mange registreringer i min database.
Jeg kom i tanke om, at det kun er nødvendigt at gemme stemmeantallet, når stemmeantallet har ændret sig siden sidste registrering. Hvis et forslag er viralt, registreres stemmeantallet stadigvæk hvert 10. minut. Hvis et forslag er døende, kan der gå meget længere tid mellem hver registrering.
Her er den nye udgave af robotten, som tjekker om der findes andre registreringer af samme forslag med samme stemmeantal, og kun gemmer antal stemmer, hvis der ikke gør:
import requests
from datetime import datetime
import locale
import psycopg2
from psycopg2 import Error
# Locale is set to Danish to parse dates correctly
locale.setlocale(locale.LC_TIME, ('da_DK', 'UTF-8'))
# API url
url = 'https://www.borgerforslag.dk/api/proposals/search'
# Query parameters
suggestions_per_request = 300
params_json = {
"filter": "active",
"sortOrder": "NewestFirst",
"searchQuery":"",
"pageNumber":0,
"pageSize": suggestions_per_request
}
# Connect to database
try:
connection = psycopg2.connect(user = "",
password = "",
host = "",
port = "",
database = "")
cursor = connection.cursor()
except (Exception, psycopg2.Error) as error:
print ("Error while connecting to PostgreSQL", error)
now = datetime.utcnow()
# Insert into database function
def insert_suggestion_and_votes(connection, suggestion):
with connection:
with connection.cursor() as cur:
try:
# By default, votes are inserted, except when no new votes have been added
# This variable is used to keep track of whether votes should be inserted
insert_votes = True
# See if suggestion already exists in table table borgerforslag_suggestion
sql = '''SELECT * FROM borgerforslag_suggestion WHERE unique_id = %s'''
cur.execute(sql, (suggestion['externalId'],))
suggestion_records = cur.fetchone()
# If suggestion does not already exist, add suggestion to table borgerforslag_suggestion
if not suggestion_records:
suggestion_data = (suggestion['externalId'],suggestion['title'],suggestion['date'],suggestion['url'],suggestion['status'])
sql = '''INSERT INTO borgerforslag_suggestion(unique_id,title,suggested_date,url,status) VALUES(%s,%s,%s,%s,%s) RETURNING id'''
cur.execute(sql, suggestion_data)
id = cur.fetchone()[0]
# If yes, get id of already added suggestion
else:
id = suggestion_records[0]
# Check in table table borgerforslag_vote whether a record with the same number of votes exists.
# If it does, no need to save votes
sql = '''SELECT * FROM borgerforslag_vote WHERE suggestion_id = %s AND votes = %s'''
cur.execute(sql, (id,suggestion['votes']))
vote_record = cur.fetchone()
if vote_record:
insert_votes = False
# Add votes to table borgerforslag_vote (if suggestion is new or vote count has changed since last run)
if insert_votes == True:
sql = '''INSERT INTO borgerforslag_vote(suggestion_id,timestamp,votes)
VALUES(%s,%s,%s)'''
cur.execute(sql, (id,now,suggestion['votes']))
except Error as e:
print(e, suggestion)
# Loop preparation
requested_results = 0
number_of_results = requested_results + 1
number_of_loops = 0
# Loop to get suggestions and add them to database
while requested_results < number_of_results and number_of_loops < 10:
response = requests.post(url, json=params_json)
json_response = response.json()
number_of_results = json_response['resultCount']
requested_results += suggestions_per_request
number_of_loops += 1
params_json['pageNumber'] += 1
for suggestion in json_response['data']:
suggestion['date'] = datetime.strptime(suggestion['date'], '%d. %B %Y') # convert date to datetime
insert_suggestion_and_votes(connection, suggestion)
Oprydning
Nu hvor jeg fik gjort tempoet, min database vokser med, lidt langsommere, ville jeg også gerne rydde lidt op i de gamle registreringer, hvor jeg jo havde gemt antal stemmer hvert 10. minut, uanset om antallet havde ændret sig.
Det skrev jeg også et lille script til. Her er logikken at jeg henter alle stemmeregistreringer sorteret efter hvilket borgerforslag, de hører til, og dernæst efter tidspunkt for registreringen.
Med rækkefølgen på plads, kan jeg for hver registrering tjekke, om den både vedrører samme borgerforslag som den tidligere registrering, og at stemmeantallet er det samme som den tidligere registrering. Hvis begge dele er sandt, er registreringen overflødig og kan slettes:
import psycopg2
from psycopg2 import Error
# Connect to database
try:
connection = psycopg2.connect(user = "",
password = "",
host = "",
port = "",
database = "")
cursor = connection.cursor()
except (Exception, psycopg2.Error) as error:
print ("Error while connecting to PostgreSQL", error)
with connection:
with connection.cursor() as cur:
sql = '''SELECT "borgerforslag_vote"."id", "borgerforslag_vote"."suggestion_id", "borgerforslag_vote"."timestamp", "borgerforslag_vote"."votes" FROM "borgerforslag_vote" ORDER BY "borgerforslag_vote"."suggestion_id" ASC, "borgerforslag_vote"."timestamp" ASC'''
cur.execute(sql)
rows = cur.fetchall()
previous_vote_number = -1
previous_vote_suggestion = -1000
for row in rows:
votes = row[3]
suggestion = row[1]
id = row[0]
if votes == previous_vote_number and previous_vote_suggestion == suggestion:
sql = '''DELETE FROM "borgerforslag_vote" WHERE "borgerforslag_vote"."id" = %s'''
cur.execute(sql, (id, ))
previous_vote_number = row[3]
previous_vote_suggestion = row[1]
Test af kandidattest hos DR med Python
Hos Danmarks Radio kan man forsøge at afklare sine holdninger og se, hvilke politikeres holdninger, der ligner mest. Rækken af metodiske problemer med sådan nogle kandidattests er lang, men på Twitter skrev Søren om en mulig skævhed alene i kraft af, at der typisk er “flere kandidater i posen” fra store partier end små:

Tobias var først med at komme med et bud, men jeg kunne ikke lade være med selv at prøve at teste hypotesen. Det kom der dette lille program ud af, der fyrer 25.000 tilfældige svar på testen af sted og ser hvad Danmarks Radio svarer tilbage. Læg mærke til at programmet ikke generer 25.000 forskellige tilfældige kombinationer men blot genererer tilfældige kombinationer 25.000 gange. (Det vil sige, at den samme kombination kan forekomme flere gange ud af de 25.000 gange.):
import requests
import random
base_url = 'https://www.dr.dk/nyheder/politik/api/kandidattest/GetMunicipalityMatch?municipality=124&answers='
stats = {}
for i in range(25000):
try:
sequence = ",".join([str(random.choice([1,2,4,5])) for i in range(18)])
response = requests.get(base_url + sequence)
json = response.json()
candidate_one_party = json['TopMatches'][0]['CandidateBasic']['Party']
if candidate_one_party not in stats:
stats[candidate_one_party] = 1
else:
stats[candidate_one_party] += 1
print(i)
except:
pass
with open('stats.txt', 'w') as output:
output.write(str(stats))
Det kom der følgende rangliste ud af for Københavns Kommune. Tabellen viser hvor mange gange en kandidat fra partiet var den kandidat, man var mest enig med på baggrund af sine svar i testen:
Parti | Antal | Procent |
---|---|---|
Socialdemokratiet | 4210 | 17% |
Det Konservative Folkeparti | 3680 | 15% |
Radikale Venstre | 3262 | 13% |
Venstre, Danmarks Liberale Parti | 2632 | 11% |
SF – Socialistisk Folkeparti | 2034 | 8% |
Alternativet | 1528 | 6% |
Kristendemokraterne | 1231 | 5% |
Frihedslisten | 1120 | 4% |
Nye Borgerlige | 1109 | 4% |
Enhedslisten – De Rød-Grønne | 963 | 4% |
Dansk Folkeparti | 960 | 4% |
Kommunisterne | 606 | 2% |
Veganerpartiet | 421 | 2% |
Københavnerlisten | 285 | 1% |
Hampepartiet | 273 | 1% |
Liberal Alliance | 193 | 1% |
Kommunistisk Parti | 190 | 1% |
Danmark for Alle | 170 | 1% |
Det Demokratiske Parti | 67 | 0% |
Bæredygtigt Samfund | 43 | 0% |
Rolig Revolution | 21 | 0% |
Total | 24998 | 100% |
Prøv selv, hvis du gider! Og husk: De fleste, der udfylder kandidattests i virkeligheden, slår nok ikke med terning når de vælger svar.
Hvordan udvikler antal underskrifter sig på Borgerforslag.dk?
På Twitter skrev Peter Brodersen:

Peters idé er sjov, synes jeg, så jeg er så småt begyndt at bygge et eller andet, der monitorerer hvordan antallet af underskrifter på borgerforslag udvikler sig over tid.
Så nu tygger min webserver sig igennem nedenstående script hvert 10. minut og gemmer det aktuelle antal underskrifter på hvert borgerforslag. Når der er gået nogle uger, vil jeg se om jeg kan lave nogle interessante visualiseringer af data.
import requests
from datetime import datetime
import locale
import psycopg2
from psycopg2 import Error
### PREPARATION ###
# Locale is set to Danish to be able to parse dates from Borgerforslag
locale.setlocale(locale.LC_TIME, ('da_DK', 'UTF-8'))
# API url and request parameters
url = 'https://www.borgerforslag.dk/api/proposals/search'
suggestions_per_request = 300
params_json = {
"filter": "active",
"sortOrder": "NewestFirst",
"searchQuery":"",
"pageNumber":0,
"pageSize": suggestions_per_request
}
# Connect to database
try:
connection = psycopg2.connect(user = "",
password = "",
host = "",
port = "",
database = "")
cursor = connection.cursor()
except (Exception, psycopg2.Error) as error:
print ("Error while connecting to PostgreSQL", error)
now = datetime.utcnow()
# Insert into database function
def insert_suggestion_and_votes(connection, suggestion):
with connection:
with connection.cursor() as cur:
try:
# See if suggestion already exists
sql = '''SELECT * FROM borgerforslag_suggestion WHERE unique_id = %s'''
cur.execute(sql, (suggestion['externalId'],))
suggestion_records = cur.fetchone()
# If not, add suggestion
if not suggestion_records:
suggestion_data = (suggestion['externalId'],suggestion['title'],suggestion['date'],suggestion['url'],suggestion['status'])
sql = '''INSERT INTO borgerforslag_suggestion(unique_id,title,suggested_date,url,status) VALUES(%s,%s,%s,%s,%s) RETURNING id'''
cur.execute(sql, suggestion_data)
id = cur.fetchone()[0]
# If yes, get id
else:
id = suggestion_records[0]
# Add votes
sql = '''INSERT INTO borgerforslag_vote(suggestion_id,timestamp,votes)
VALUES(%s,%s,%s)'''
cur.execute(sql, (id,now,suggestion['votes']))
except Error as e:
print(e, suggestion)
# Loop preparation
requested_results = 0
number_of_results = requested_results + 1
number_of_loops = 0
# Loop to get suggestions and add them to database
while requested_results < number_of_results and number_of_loops < 10:
response = requests.post(url, json=params_json)
json_response = response.json()
number_of_results = json_response['resultCount']
requested_results += suggestions_per_request
number_of_loops += 1
params_json['pageNumber'] += 1
for suggestion in json_response['data']:
suggestion['date'] = datetime.strptime(suggestion['date'], '%d. %B %Y') # convert date to datetime
insert_suggestion_and_votes(connection, suggestion)