- camelotはすごく遅いのでpdfplumberに変更しています
- 5185と5206とが入れ替わってたのでソートで並び替え
- 12/30から未満が未満代になっている文字の重なりでcamelotは「代」を結合、pdfplumberは「代」を除去されている
pip install pdfplumber
github.com
import pathlib
import re
from urllib.parse import urljoin
import pandas as pd
import pdfplumber
import requests
from bs4 import BeautifulSoup
def fetch_file(url, dir="."):
r = requests.get(url)
r.raise_for_status()
p = pathlib.Path(dir, pathlib.PurePath(url).name)
p.parent.mkdir(parents=True, exist_ok=True)
with p.open(mode="wb") as fw:
fw.write(r.content)
return p
def days2date(s):
y = 2021 if s.name > 16576 else 2020
days = re.findall("[0-9]{1,2}", s["発表日"])
if len(days) == 2:
m, d = map(int, days)
return pd.Timestamp(year=y, month=m, day=d)
else:
return pd.NaT
url = "https://www.pref.aichi.jp/site/covid19-aichi/"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
r = requests.get(url, headers=headers)
r.raise_for_status()
soup = BeautifulSoup(r.content, "html.parser")
dfs = []
for tag in soup.find("span", text="▶ 愛知県内の発生事例").parent.find_all(
"a", href=re.compile(".pdf$")
)[::-1]:
link = urljoin(url, tag.get("href"))
path_pdf = fetch_file(link)
with pdfplumber.open(path_pdf) as pdf:
for page in pdf.pages:
table = page.extract_table()
df_tmp = pd.DataFrame(table[1:], columns=table[0])
dfs.append(df_tmp)
df = pd.concat(dfs).set_index("No")
df.dropna(subset=["発表日"], inplace=True)
df.index = df.index.astype(int)
df.sort_index(inplace=True)
df["発表日"] = df.apply(days2date, axis=1)
df["date"] = df["発表日"].dt.strftime("%Y-%m-%d")
df["w"] = (df["発表日"].dt.dayofweek + 1) % 7
df["w"] = df["w"].astype(str)
df["short_date"] = df["発表日"].dt.strftime("%m\\/%d")
df["発表日"] = df["発表日"].dt.strftime("%Y/%m/%d %H:%M")
p = pathlib.Path("./data/patients.csv")
p.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(p, encoding="utf_8_sig")