不審者マップ

qiita.com

愛媛県の不審者マップ https://imabari.github.io/fushinsha_map/

import pathlib

import pandas as pd
import requests

import folium

GEO_URL = "https://raw.githubusercontent.com/geolonia/japanese-addresses/master/data/latest.csv"


def fetch_file(url, dir="."):

    r = requests.get(url)
    r.raise_for_status()

    p = pathlib.Path(dir, pathlib.PurePath(url).name)
    p.parent.mkdir(parents=True, exist_ok=True)

    with p.open(mode="wb") as fw:
        fw.write(r.content)
    return p


if __name__ == "__main__":

    # 数字を漢数字
    kanji = str.maketrans("1234567890", "一二三四五六七八九〇")

    p_geo = fetch_file(GEO_URL, "src")

    df_geo = pd.read_csv(p_geo)

    # 愛媛県のみ抽出
    df_geo_ehime = df_geo[df_geo["都道府県名"] == "愛媛県"].copy()

    # 市名と町名を結合
    df_geo_ehime["address"] = df_geo_ehime["市区町村名"] + df_geo_ehime["大字町丁目名"]

    # 愛媛県警の不審者情報をスクレイピング
    df_tmp = (
        pd.read_html(
            "http://www.police.pref.ehime.jp/fushinsha.htm", match="概 要", header=0
        )[0]
        .fillna("")
        .astype(str)
    )

    # 内容を正規化
    df_tmp["概 要"] = df_tmp["概 要"].str.normalize("NFKC")

    # 項目ごとに分割
    df = df_tmp["概 要"].str.extract("(.+)◆.+:(.+)◆.+:(.+)◆.+:(.+)◆.+:(.+)")

    # 列名変更
    df.rename(columns={0: "管轄署", 1: "種別", 2: "日時", 3: "場所", 4: "状況"}, inplace=True)

    # 前後の空白文字を削除
    df = df.applymap(lambda s: s.strip())

    # かっこを削除
    df["管轄署"] = df["管轄署"].str.strip("()")

    # 住所間違い訂正
    df["場所"] = df["場所"].str.replace("常磐", "常盤")

    # 住所を漢数字に変換
    df["場所"] = df["場所"].apply(lambda s: s.translate(kanji))

    # 町名までに修正
    df["住所"] = df["場所"].str.replace(
        "(路上|施設|店舗|付近|一般住宅|住宅|アパート|マンション|公園|屋外|緑地|駐輪場|駐車場|河川敷|児童).*", "", regex=True
    )

    # 「甲乙丙の」を削除
    df["address"] = df["住所"].str.rstrip("甲乙丙の")

    # 北新田がないので新田に訂正
    df["address"] = df["address"].str.replace("西条市新田字北新田", "西条市新田")

    df["count"] = df.groupby("address").cumcount()

    # 種別の確認
    df["種別"].unique()

    # アイコンの色を追加
    df["color"] = df["種別"].replace(
        {
            "のぞき・盗撮": "pink",
            "身体露出": "orange",
            "ちかん": "gray",
            "不審者": "purple",
            "声かけ": "green",
            "暴行": "red",
            "つきまとい": "blue",
            "写真撮影": "lightred",
            "建造物侵入": "darkred",
            "住居侵入": "darkred",
            "のぞき": "pink",
            "動画撮影": "lightred",
        }
    )

    # 上記に該当しない場合は黒
    df["color"] = df["color"].fillna("black")

    # 色の種類
    colors = {
        "lightred",
        "darkred",
        "darkblue",
        "pink",
        "gray",
        "green",
        "orange",
        "purple",
        "lightgray",
        "blue",
        "beige",
        "cadetblue",
        "darkgreen",
        "darkpurple",
        "lightblue",
        "black",
        "lightgreen",
        "red",
        "white",
    }

    # 住所から緯度経度をマージ
    df_ehime = df.merge(df_geo_ehime, how="left", on="address")

    p_csv = pathlib.Path("map", "ehime.csv")
    p_csv.parent.mkdir(parents=True, exist_ok=True)

    df_ehime.to_csv(p_csv, encoding="utf_8_sig")

    # 欠損を確認
    df_nan = df_ehime[df_ehime.isnull().any(axis=1)]
    p_nan = pathlib.Path("map", "nan.csv")
    df_nan.to_csv(p_nan, encoding="utf_8_sig")

    # 欠損を削除
    df_ehime.dropna(inplace=True)

    map = folium.Map(location=[34.06604300, 132.99765800], zoom_start=10)

    for i, r in df_ehime.iterrows():
        folium.Marker(
            location=[r["緯度"], r["経度"] + r["count"] * 0.0002],
            popup=folium.Popup(
                f'<p>{r["管轄署"]}</p><p>{r["種別"]}</p><p>{r["日時"]}</p><p>{r["場所"]}</p><p>{r["状況"]}</p>',
                max_width=300,
                min_width=150,
            ),
            icon=folium.Icon(color=r["color"]),
        ).add_to(map)

    p_map = pathlib.Path("map", "index.html")

    map.save(str(p_map))

OCR Spaceで愛知県の検査陽性者の状況をスクレイピング

OCR SpaceのFree OCR APIを使って愛知県の検査陽性者の状況をスクレイピングする

ocr.space

keyはENVのOCR_SPACE_APIに入れる

import os
import re
from urllib.parse import urljoin

import requests
from bs4 import BeautifulSoup

ocr_api_key = os.environ["OCR_SPACE_API"]

def ocr_space_url(url, overlay=False, api_key=ocr_api_key, language="eng"):

    payload = {
        "url": url,
        "isOverlayRequired": overlay,
        "apikey": api_key,
        "language": language,
    }
    r = requests.post("https://api.ocr.space/parse/image", data=payload,)
    return r.json()


url = "https://www.pref.aichi.jp/site/covid19-aichi/"

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
}

r = requests.get(url, headers=headers)
r.raise_for_status()
soup = BeautifulSoup(r.content, "html.parser")

src = soup.find("img", alt=re.compile("検査陽性者$")).get("src")
img_url = urljoin(url, src)

res = ocr_space_url(url=img_url, language="jpn")

text = res["ParsedResults"][0]["ParsedText"]

data = [int(i.rstrip("人").replace(",", "")) for i in re.findall("[0-9,]+人", text)]
print(data)

result = []

while len(data) >= 12:

    if data[2] == data[3] + data[4] + data[5]:
        if (
            data[1]
            == data[2] + data[6] + data[7] + data[8] + data[9] + data[10] + data[11]
        ):
            result = data[:12]
            break

    data.pop(0)

result

# 日時

m_date = re.search("(\d{4})年(\d{1,2})月(\d{1,2})日(\d{1,2})時", text)

if m_date:
    year, month, day, hour = map(int, m_date.groups())
    dt_update = datetime.datetime(year, month, day, hour)
else:
    dt_update = datetime.datetime.now()

dt_update

# 備考

m = re.search("^※1.+検査を行ったものについて", text, re.DOTALL | re.MULTILINE)
remark = "".join(m.group(0).replace("※1", "").replace("※2", "").replace("(注)", "").replace("検査を行ったものについて", "検査を行ったものについて掲載。").splitlines())

remark

新潟県のコロナウイルス

# -*- coding: utf-8 -*-

import datetime
import pathlib
import re
from urllib.parse import urljoin

import jaconv
import pandas as pd
import requests
from bs4 import BeautifulSoup

JST = datetime.timezone(datetime.timedelta(hours=+9))
dt_now = datetime.datetime.now(JST)

BASE_URL = "https://www.pref.niigata.lg.jp/site/shingata-corona/index.html"

niigata_names = {
    151009: "新潟市",
    151017: "新潟市北区",
    151025: "新潟市東区",
    151033: "新潟市中央区",
    151041: "新潟市江南区",
    151050: "新潟市秋葉区",
    151068: "新潟市南区",
    151076: "新潟市西区",
    151084: "新潟市西蒲区",
    152021: "長岡市",
    152048: "三条市",
    152056: "柏崎市",
    152064: "新発田市",
    152081: "小千谷市",
    152099: "加茂市",
    152102: "十日町市",
    152111: "見附市",
    152129: "村上市",
    152137: "燕市",
    152161: "糸魚川市",
    152170: "妙高市",
    152188: "五泉市",
    152226: "上越市",
    152234: "阿賀野市",
    152242: "佐渡市",
    152251: "魚沼市",
    152269: "南魚沼市",
    152277: "胎内市",
    153079: "聖籠町",
    153427: "弥彦村",
    153613: "田上町",
    153851: "阿賀町",
    154059: "出雲崎町",
    154610: "湯沢町",
    154822: "津南町",
    155047: "刈羽村",
    155811: "関川村",
    155861: "粟島浦村",
}

niigata_codes = {v: k for k, v in niigata_names.items()}


def niigata_get_code(s):
    return niigata_codes.get(s.strip(), 0)


def str2date(s):

    n = re.findall("[0-9]{1,2}", s)

    y = dt_now.year

    if len(n) == 2:
        m, d = map(int, n)
        return pd.Timestamp(y, m, d)

    else:
        return pd.NaT


def df_update(df1, df2):

    df = df1.reindex(df1.index.union(df2.index))
    df.update(df2)

    return df


def fetch_yousei(url):

    df = pd.read_html(url, index_col=0)[0].T
    df.rename(
        index={"入院中 (予定含む)": "hospitalization", "退院": "discharge"},
        columns={"累計": "count"},
        inplace=True,
    )

    df1 = df.loc[["hospitalization", "discharge"], "count"].copy()
    df1.index.name = "type"

    p_hospitalization_csv = pathlib.Path("dist", "csv", "hospitalization.csv")
    p_hospitalization_csv.parent.mkdir(parents=True, exist_ok=True)

    df1.to_csv(p_hospitalization_csv, encoding="utf_8_sig")


def fetch_excel(url, text):

    r = requests.get(url)
    r.raise_for_status()

    soup = BeautifulSoup(r.content, "html.parser")

    tag = soup.find("a", text=re.compile(f"^{text}"), href=re.compile("xls[mx]?$"))

    if tag:
        link = urljoin(url, tag.get("href"))
        p = fetch_file(link, r"dist/excel")

        return p
    else:
        raise FileNotFoundError("Excelファイルが見つかりません")


def fetch_file(url, dir="."):

    r = requests.get(url)
    r.raise_for_status()

    p = pathlib.Path(dir, pathlib.PurePath(url).name)
    p.parent.mkdir(parents=True, exist_ok=True)

    with p.open(mode="wb") as fw:
        fw.write(r.content)
    return p


def fetch_kanja(url):

    df = pd.read_html(url, na_values=["-", "-", "―"])[0]

    df.rename(
        columns={"患者 No. ※報道発表資料へリンク": "No", "患者 No. ※報道発表資料へリンク.1": "報道発表資料"},
        inplace=True,
    )

    df.dropna(thresh=4, inplace=True)

    df["備考"] = df["備考"].fillna("").astype(str)

    df["年代"] = df["年代"].fillna("").astype(str)
    df["年代"] = df["年代"].mask(df["年代"].str.startswith("10歳未満"), "10歳未満")

    df["判明日"] = df["判明日"].apply(
        lambda s: jaconv.z2h(s, kana=False, digit=True, ascii=True).replace(" ", "")
    )
    df["判明日"] = df["判明日"].apply(str2date)

    df["居住地"] = df["居住地"].apply(
        lambda s: jaconv.z2h(s, kana=False, digit=True, ascii=True).replace(" ", "")
    )
    df["居住地"] = df["居住地"].apply(lambda s: s.rstrip(")").split("(")[-1])

    df = df.sort_values("No").reset_index(drop=True)

    df.to_csv("kanja.tsv", sep="\t")

    df1 = df.copy()

    df1.rename(
        columns={
            "判明日": "公表_年月日",
            "居住地": "患者_居住地",
            "年代": "患者_年代",
            "性別": "患者_性別",
            "職業": "患者_職業",
        },
        inplace=True,
    )

    df1["都道府県名"] = "新潟県"
    df1["市区町村名"] = df1["患者_居住地"]
    df1["全国地方公共団体コード"] = df1["市区町村名"].apply(niigata_get_code)

    df2 = df1.reindex(
        columns=[
            "No",
            "全国地方公共団体コード",
            "都道府県名",
            "市区町村名",
            "公表_年月日",
            "発症_年月日",
            "患者_居住地",
            "患者_年代",
            "患者_性別",
            "患者_職業",
            "患者_状態",
            "患者_症状",
            "患者_渡航歴の有無フラグ",
            "患者_退院済フラグ",
            "備考",
        ]
    )
    
    df2.set_index("No", inplace=True)

    p_patients_csv = pathlib.Path("dist", "csv", "150002_niigata_covid19_patients.csv")
    p_patients_csv.parent.mkdir(parents=True, exist_ok=True)

    df2.to_csv(p_patients_csv, encoding="utf_8_sig")


def fetch_soudan(url):

    p_soudan = fetch_excel(url, "センター相談件数")

    df = pd.read_excel(p_soudan, skiprows=3, skipfooter=4)

    df.set_axis(["年", "受付_年月日", "曜日", "相談件数", "紹介人数", "備考"], axis=1, inplace=True)

    flg_is_serial = df["受付_年月日"].astype("str").str.isdigit()

    fromSerial = pd.to_datetime(
        df.loc[flg_is_serial, "受付_年月日"].astype(float),
        unit="D",
        origin=pd.Timestamp("1899/12/30"),
    )

    fromString = df.loc[~flg_is_serial, "受付_年月日"]

    df["受付_年月日"] = pd.concat([fromString, fromSerial])

    df1 = df.loc[flg_is_serial].copy()
    df1.drop(["年", "曜日"], axis=1, inplace=True)
    df1.reset_index(drop=True, inplace=True)

    df1.to_csv("soudan.tsv", sep="\t")

    df2 = df1.copy()

    df2["全国地方公共団体コード"] = 150002
    df2["都道府県名"] = "新潟県"
    df2["市区町村名"] = ""
    df2["備考"] = ""

    df3 = df2.reindex(
        columns=["受付_年月日", "全国地方公共団体コード", "都道府県名", "市区町村名", "相談件数", "備考",]
    )

    df3.set_index("受付_年月日", inplace=True)

    df_temp = pd.read_csv(
        "https://raw.githubusercontent.com/CodeForNiigata/covid19-data-niigata/master/dist/csv/150002_niigata_covid19_test_count.csv",
        index_col=0,
        parse_dates=True,
        dtype={"全国地方公共団体コード": "Int64", "相談件数": "Int64"},
    )

    df4 = df_update(df_temp, df3)
    df4.index = df4.index.strftime("%Y-%m-%d")

    p_callcenter_csv = pathlib.Path(
        "dist", "csv", "150002_niigata_covid19_call_center.csv"
    )
    p_callcenter_csv.parent.mkdir(parents=True, exist_ok=True)

    df4.to_csv(p_callcenter_csv, encoding="utf_8_sig")


def fetch_kensa(url):

    p_kensa = fetch_excel(url, "検査件数一覧表")

    df = pd.read_excel(p_kensa, skiprows=2, skipfooter=2)

    df.set_axis(
        ["年", "実施_年月日", "曜日", "検査実施_件数", "PCRセンター実施件数", "陽性件数"], axis=1, inplace=True
    )

    flg_is_serial = df["実施_年月日"].astype("str").str.isdigit()

    fromSerial = pd.to_datetime(
        df.loc[flg_is_serial, "実施_年月日"].astype(float),
        unit="D",
        origin=pd.Timestamp("1899/12/30"),
    )

    fromString = df.loc[~flg_is_serial, "実施_年月日"]

    df["実施_年月日"] = pd.concat([fromString, fromSerial])

    df1 = df.loc[flg_is_serial].copy()
    df1.drop(["年", "曜日"], axis=1, inplace=True)

    df1["検査実施_件数"] = df1["検査実施_件数"].fillna(0).astype("Int64")
    df1["PCRセンター実施件数"] = df1["PCRセンター実施件数"].fillna(0).astype("Int64")
    df1["陽性件数"] = df1["陽性件数"].fillna(0).astype("Int64")

    df1.reset_index(drop=True, inplace=True)

    df1.to_csv("kensa.tsv", sep="\t")

    df2 = df1.copy()

    df2["全国地方公共団体コード"] = 150002
    df2["都道府県名"] = "新潟県"
    df2["市区町村名"] = ""
    df2["備考"] = ""

    df3 = df2.reindex(
        columns=["実施_年月日", "全国地方公共団体コード", "都道府県名", "市区町村名", "検査実施_件数", "備考",]
    )

    df3.set_index("実施_年月日", inplace=True)

    df_temp = pd.read_csv(
        "https://raw.githubusercontent.com/CodeForNiigata/covid19-data-niigata/master/dist/csv/150002_niigata_covid19_test_count.csv",
        index_col=0,
        parse_dates=True,
        dtype={"全国地方公共団体コード": "Int64", "検査実施_件数": "Int64"},
    )

    df4 = df_update(df_temp, df3)
    df4.index = df4.index.strftime("%Y-%m-%d")

    p_testcount_csv = pathlib.Path(
        "dist", "csv", "150002_niigata_covid19_test_count.csv",
    )

    p_testcount_csv.parent.mkdir(parents=True, exist_ok=True)

    df4.to_csv(p_testcount_csv, encoding="utf_8_sig")


if __name__ == "__main__":

    r = requests.get(BASE_URL)
    r.raise_for_status()

    soup = BeautifulSoup(r.content, "html.parser")

    tag = soup.find("a", text="県内における発生状況の詳細はこちら")

    if tag:
        link = urljoin(BASE_URL, tag.get("href"))

        fetch_yousei(BASE_URL)
        fetch_kanja(link)
        fetch_soudan(link)
        fetch_kensa(link)

岩手県 コロナ情報検 検査件数・検査の陽性率を作成

import datetime
import re

import pandas as pd

JST = datetime.timezone(datetime.timedelta(hours=+9))
dt_now = datetime.datetime.now(JST)

title = [
    "PCR検査 行政検査件数",
    "PCR検査 民間検査件数",
    "PCR検査 実施数",
    "PCR検査 陽性者数",
    "PCR検査 陰性者数",
    "抗原検査 実施数",
    "抗原検査 陽性者数",
    "抗原検査 陰性者数",
    "検査件数 合計",
    "陽性者数 合計",
    "陰性者数 合計",
]


def str2date(s):

    n = re.findall("[0-9]{1,2}", s)

    y = dt_now.year

    if len(n) == 2:
        m, d = map(int, n)
        return datetime.date(y, m, d)

    else:
        return pd.NaT


def kensa_calc(df_tmp):

    df_tmp.index = df_tmp.index.map(str2date)

    df = (
        pd.concat(
            [
                items.astype(str).str.extractall("([0-9,]+)")
                for label, items in df_tmp.iteritems()
            ],
            axis=1,
        )
        .set_axis(df_tmp.columns, axis=1)
        .rename(index={0: "検査数", 1: "陽性者数"}, level=1)
        .unstack()
        .fillna(0)
        .astype(int)
    )

    df.columns = [" ".join(col).strip() for col in df.columns.values]

    df.rename(
        columns={
            "行政検査件数 (PCR検査) 検査数": "PCR検査 行政検査件数",
            "行政検査件数 (PCR検査) 陽性者数": "PCR検査 行政陽性者数",
            "民間検査件数 (PCR検査) 検査数": "PCR検査 民間検査件数",
            "民間検査件数 (PCR検査) 陽性者数": "PCR検査 民間陽性者数",
            "抗原検査件数 検査数": "抗原検査 実施数",
            "抗原検査件数 陽性者数": "抗原検査 陽性者数",
            "合計 検査数": "検査件数 合計",
            "合計 陽性者数": "陽性者数 合計",
        },
        inplace=True,
    )

    df.index.name = "日付"

    df["PCR検査 実施数"] = df["PCR検査 行政検査件数"] + df["PCR検査 民間検査件数"]
    df["PCR検査 陽性者数"] = df["PCR検査 行政陽性者数"] + df["PCR検査 民間陽性者数"]
    df["PCR検査 陰性者数"] = df["PCR検査 実施数"] - df["PCR検査 陽性者数"]

    df["抗原検査 陰性者数"] = df["抗原検査 実施数"] - df["抗原検査 陽性者数"]

    df["検査件数 合計"] = df["PCR検査 実施数"] + df["抗原検査 実施数"]
    df["陽性者数 合計"] = df["PCR検査 陽性者数"] + df["抗原検査 陽性者数"]
    df["陰性者数 合計"] = df["PCR検査 陰性者数"] + df["抗原検査 陰性者数"]

    df.sort_index(ascending=True, inplace=True)

    return df.reindex(columns=title)


if __name__ == "__main__":

    dfs = pd.read_html(
        "https://www.pref.iwate.jp/kurashikankyou/iryou/covid19/index.html", index_col=0
    )

    df1 = kensa_calc(dfs[1].T.iloc[1:-1])

    df2 = (
        pd.read_csv(
            "https://raw.githubusercontent.com/MeditationDuck/covid19/development/data/csv/%E5%B2%A9%E6%89%8B%E7%9C%8C%20%E3%82%B3%E3%83%AD%E3%83%8A%E6%83%85%E5%A0%B1%20-%20%E6%A4%9C%E6%9F%BB%E4%BB%B6%E6%95%B0%E3%83%BB%E6%A4%9C%E6%9F%BB%E3%81%AE%E9%99%BD%E6%80%A7%E7%8E%87.csv",
            index_col="日付",
            parse_dates=True,
            dtype={
                "PCR検査 行政検査件数": "Int64",
                "PCR検査 民間検査件数": "Int64",
                "PCR検査 実施数": "Int64",
                "PCR検査 陽性者数": "Int64",
                "PCR検査 陰性者数": "Int64",
                "抗原検査 実施数": "Int64",
                "抗原検査 陽性者数": "Int64",
                "抗原検査 陰性者数": "Int64",
                "検査件数 合計": "Int64",
                "陽性者数 合計": "Int64",
                "陰性者数 合計": "Int64",
            },
        )
        .reindex(columns=title)
        .sort_index(ascending=False)
    )

    df = df2.reindex(df2.index.union(df1.index))
    df.update(df1)

    df = df.astype("Int64")

    df["検査件数(7日間移動平均)"] = df["検査件数 合計"].rolling(window=7).mean().round(2)
    df["陽性者数(7日間移動平均)"] = df["陽性者数 合計"].rolling(window=7).mean().round(2)
    df["陽性率"] = (
        df["陽性者数 合計"].rolling(window=7).mean()
        / df["検査件数 合計"].rolling(window=7).mean()
        * 100
    ).round(2)

    df.sort_index(ascending=False, inplace=True)

    df.to_csv("岩手県 コロナ情報 - 検査件数・検査の陽性率.csv")

愛知県内の感染者・検査件数

import datetime
import pathlib
import re

import pandas as pd

def str2date(s):

    y = datetime.date.today().year
    m, d = map(int, re.findall("\d{1,2}", s.split("~")[-1].strip()))
    return pd.Timestamp(y, m, d)

df = pd.read_html("https://www.pref.aichi.jp/site/covid19-aichi/kansensya-kensa.html", match="検査日", na_values="-")[0].iloc[:-1]

df["備考"] = df["検査日"]

df.loc[df["検査日"].str.contains("~"), "合算"] = "○"

df["検査日"] = df["検査日"].apply(str2date)

p = pathlib.Path("data", "inspections_summary.csv")
p.parent.mkdir(parents=True, exist_ok=True)

df.to_csv(p, index=False, header=True)