楽天エリアマップ差分抽出

qiita.com

import cv2
import numpy as np
from google.colab.patches import cv2_imshow
from PIL import Image
from staticmap import StaticMap


def get_map(
    url, lat=33.84167, lng=132.76611, width=2000, height=2000, zoom=12, fn="map.png"
):

    smap = StaticMap(width, height, url_template=url)

    img = smap.render(zoom=zoom, center=[lng, lat])

    img.save(fn)


# 県庁
# lat, lng = 33.84167, 132.76611

lat, lng, zoom = 34.0663192, 132.9975244, 14

kokudo = "https://cyberjapandata.gsi.go.jp/xyz/pale/{z}/{x}/{y}.png"

rakuten2m = "https://gateway-api.global.rakuten.com/dsd/geoserver/4g2m/mno_coverage_map/gwc/service/gmaps?LAYERS=mno_coverage_map:all_map&FORMAT=image/png&TRANSPARENT=TRUE&x={x}&y={y}&zoom={z}"

rakuten4m = "https://gateway-api.global.rakuten.com/dsd/geoserver/4g4m/mno_coverage_map/gwc/service/gmaps?LAYERS=mno_coverage_map:all_map&FORMAT=image/png&TRANSPARENT=TRUE&x={x}&y={y}&zoom={z}"

get_map(
    kokudo,
    lat=lat,
    lng=lng,
    zoom=zoom,
    width=2000,
    height=2000,
)

get_map(
    rakuten2m, lat=lat, lng=lng, zoom=zoom, width=2000, height=2000, fn="area2m.png"
)

get_map(
    rakuten4m, lat=lat, lng=lng, zoom=zoom, width=2000, height=2000, fn="area4m.png"
)


def bgr_mask(img, bgr):

    bgrLower = np.array(bgr)
    bgrUpper = np.array(bgr)

    img_mask = cv2.inRange(img, bgrLower, bgrUpper)

    return img_mask


srcmap = cv2.imread("map.png")

src2m = cv2.imread("area2m.png")
src4m = cv2.imread("area4m.png")

"""
# エリア
[186, 102, 255]

# 拡大予定エリア
[221, 128, 196]

# パートナー
[215, 166, 255]
"""

# 拡大予定エリアのみ抽出
mask2m = bgr_mask(src2m, [221, 128, 196])
mask4m = bgr_mask(src4m, [221, 128, 196])

# 差分
mask = cv2.bitwise_xor(mask2m, mask4m)

# 差分確認
cv2_imshow(mask)

# ノイズとか小さいのとかはエリアから除去
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

# 確認
cv2_imshow(opening)

# エリア抽出
area = cv2.bitwise_and(src4m, src4m, mask=opening)

# エリア確認
cv2_imshow(area)

# 地図と合成
dst = cv2.addWeighted(srcmap, 0.5, area, 0.5, 0)

cv2_imshow(dst)

cv2.imwrite("dst.png", area)

楽天エリアマップを画像に変換

import cv2
import numpy as np
from PIL import Image
from staticmap import StaticMap


def get_map(
    url, lat=33.84167, lng=132.76611, width=2000, height=2000, zoom=12, fn="map.png"
):

    smap = StaticMap(width, height, url_template=url)

    img = smap.render(zoom=zoom, center=[lng, lat])

    img.save(fn)


kokudo = "https://cyberjapandata.gsi.go.jp/xyz/pale/{z}/{x}/{y}.png"

rakuten = "https://gateway-api.global.rakuten.com/dsd/geoserver/4g4m/mno_coverage_map/gwc/service/gmaps?LAYERS=mno_coverage_map:all_map&FORMAT=image/png&TRANSPARENT=TRUE&x={x}&y={y}&zoom={z}"

get_map(kokudo, width=4000, height=4000)
get_map(rakuten, width=4000, height=4000, fn="area.png")

# エリア透明

src = cv2.imread("area.png")

# 白をマスク
mask = np.all(src[:, :, :] == [255, 255, 255], axis=-1)

# アルファー化
dst = cv2.cvtColor(src, cv2.COLOR_BGR2BGRA)

# 白は透明
dst[mask, 3] = 0

# 白以外は半透明
dst[~mask, 3] = 180

# 保存
cv2.imwrite("dst.png", dst)

im_map = Image.open("map.png")
im_area = Image.open("dst.png")

im_map.putalpha(255)

# im_area.putalpha(160)

im_raku = Image.alpha_composite(im_map, im_area)

im_raku.save("rakuten.png")

愛媛県の不審者情報の場所情報からpygeonlpで逆ジオコーディング

geonlp.ex.nii.ac.jp

環境設定

!apt install libmecab-dev mecab-ipadic-utf8 libboost-all-dev
!apt install libgdal-dev

!pip install pygeonlp
!pip install gdal
!pip install jageocoder

!python -m jageocoder install-dictionary

逆ジオコーディング

import pandas as pd
import pygeonlp.api as api


def get_latlon(s):

    geometry = api.geoparse(s)[0].get("geometry")

    result = [None, None]

    if geometry:
        result = geometry.get("coordinates", [None, None])

    return pd.Series(result)

# 住所辞書

api.setup_basic_database(db_dir="mydic/")

api.init(db_dir="mydic")

# 不審者情報

df0 = (
    pd.read_html(
        "http://www.police.pref.ehime.jp/fushinsha.htm", match="概 要", header=0
    )[0]
    .fillna("")
    .astype(str)
)

df0["概 要"] = df0["概 要"].str.normalize("NFKC")

df1 = df0["概 要"].str.split("◆", expand=True)

df1.rename(
    columns={0: "管轄署", 1: "種別", 2: "日時", 3: "場所", 4: "状況", 5: "特徴"}, inplace=True
)

df1.replace("^(種別|日時|場所|状況|特徴):", "", regex=True, inplace=True)

for col in df1.select_dtypes(include=object).columns:
    df1[col] = df1[col].str.strip()

df1["管轄署"] = df1["管轄署"].str.strip("()")

df1

df1[["lon", "lat"]] = df1["場所"].apply(get_latlon)

df1

# 座標なし
df1[df1.isnull().any(axis=1)]

# 座標なしを除外
df2 = df1.dropna(subset=["lat", "lon"])

地図

import folium
from folium.plugins import MarkerCluster

map = folium.Map(
    location=[34.06604300, 132.99765800],
    tiles="https://cyberjapandata.gsi.go.jp/xyz/pale/{z}/{x}/{y}.png",
    attr='&copy; <a href="https://maps.gsi.go.jp/development/ichiran.html">国土地理院</a>',
    zoom_start=10,
)

marker_cluster = MarkerCluster()

for i, r in df2.iterrows():

    folium.Marker(
        location=[r.lat, r.lon],
        popup=folium.Popup(
            f'<p>{r["管轄署"]}</p><p>{r["種別"]}</p><p>{r["日時"]}</p><p>{r["場所"]}</p><p>{r["状況"]}</p><p>{r["特徴"]}</p>',
            max_width=300,
        ),
    ).add_to(marker_cluster)

marker_cluster.add_to(map)

map

map.save("ehime.html")