玉川ダムと蒼社川の水位

2021/04/18現在利用できません

import datetime

import requests
import twitter
from bs4 import BeautifulSoup


# 文字を小数点に変換、変換できない場合は0.0
def moji_float(x):

    try:
        result = float(x)
    except:
        result = 0.0

    return result


# 空文字の場合、前の値で補間、次の値で補間
def ffill_bfill(data):

    # ffill
    for y in range(1, len(data)):
        for x in range(len(data[y])):
            if not data[y][x]:
                data[y][x] = data[y - 1][x]

    # bfill
    for y in range(len(data) - 1)[::-1]:
        for x in range(len(data[y])):
            if not data[y][x]:
                data[y][x] = data[y + 1][x]

    return data


# 現在と過去を比較し記号に変換
def moji_sign(now, before):

    if now > before:
        result = '↗'
    elif now < before:
        result = '↘'
    else:
        result = '➡'
    return result


def scraping(url, tag):

    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
    }

    r = requests.get(url, headers=headers)

    if r.status_code == requests.codes.ok:

        soup = BeautifulSoup(r.content, 'html5lib')

        data = []

        for trs in soup.select(tag):

            # 列 => セル => セル内の列 => セル内のセル の順に取得
            table = [[[td.get_text(strip=True) for td in tr.select('td')]
                      for tr in tds.select('tr')]
                     for tds in trs.select('td > table > tbody')]

            for i in map(list, zip(*table)):
                temp = sum(i, [])
                data.append(temp[0:2] + list(map(moji_float, temp[2:])))

        res_data = ffill_bfill(data)
        res_sign = list(map(moji_sign, res_data[-1], res_data[-2]))

        return (res_data[-1], res_sign)


if __name__ == '__main__':

    # 現在の時刻から-8分し、10分単位に調整
    dt_now = datetime.datetime.now() - datetime.timedelta(minutes=8)
    dt_now -= datetime.timedelta(minutes=(dt_now.minute % 10))

    # 玉川ダム URL
    dam_url = 'http://183.176.244.72/cgi/170_USER_010_01.cgi?GID=170_USER_010&UI=U777&SI=00000&MNU=1&LO=88&BTY=IE6X&NDT=1&SK=0000000&DT={}&GRP=USR004&TPG=1&PG=1&KTM=3'.format(
        dt_now.strftime('%Y%m%d%H%M'))

    # ダム情報をスクレイピング
    dam_now, dam_sign = scraping(dam_url,
                                 'body > table:nth-of-type(7) > tbody > tr')

    # 蒼社川 URL
    river_url = 'http://183.176.244.72/cgi/050_HQ_030_01.cgi?GID=050_HQ_030&UI=U777&SI=00000&LO=88&SRO=1&KKB=101100&DSP=11110&SKZ=111&NDT=1&MNU=1&BTY=IE6X&SSC=0&RBC=100&DT={}&GRP=USR020&TPG=1&PG=1&KTM=3'.format(
        dt_now.strftime('%Y%m%d%H%M'))

    # 河川情報をスクレイピング
    river_now, river_sign = scraping(
        river_url, 'body > table:nth-of-type(9) > tbody > tr')

    # ダム 危険水位
    if dam_now[2] >= 158.0:
        dam_alert = '防災操作開始水位'
    else:
        dam_alert = ''

    # 片山 危険水位
    if river_now[2] >= 2.85:
        katayama_alert = 'はん濫危険水位'
    elif river_now[2] >= 2.60:
        katayama_alert = '避難判断水位'
    elif river_now[2] >= 2.40:
        katayama_alert = 'はん濫注意水位'
    elif river_now[2] >= 2.10:
        katayama_alert = '水防団待機水位'
    else:
        katayama_alert = ''

    # 高野 危険水位
    if river_now[3] >= 4.00:
        kouya_alert = 'はん濫注意水位'
    elif river_now[3] >= 3.50:
        kouya_alert = '水防団待機水位'
    else:
        kouya_alert = ''

    # 日付
    twit_date = '{}現在'.format(dt_now.strftime('%Y/%m/%d %H:%M'))

    # 玉川ダムの情報
    twit_dam = '【玉川ダム】\n貯水位:{0[2]:.2f} {1[2]} {2}\n全流入量:{0[3]:.2f} {1[3]}\n全放流量:{0[4]:.2f} {1[4]}\n貯水量:{0[5]} {1[5]}'.format(
        dam_now, dam_sign, dam_alert)

    # 蒼社川の水位
    twit_river = '【蒼社川】\n片山:{0[2]:.2f}m {1[2]} {2}\n高野:{0[3]:.2f}m {1[3]} {3}\n中通:{0[4]:.2f}m {1[4]}'.format(
        river_now, river_sign, katayama_alert, kouya_alert)

    # ツイート文字結合
    twit = '\n\n'.join([
        twit_date,
        twit_dam,
        twit_river,
    ]).strip()

    # print(len(twit))
    # print(twit)

    api = twitter.Api(
        consumer_key='',
        consumer_secret='',
        access_token_key='',
        access_token_secret='')

    status = api.PostUpdate(twit, media='http://www.pref.ehime.jp/kasen/Jpeg/Cam006/00_big.jpg')

キャッシュ対策

import datetime
import json

import requests
from bs4 import BeautifulSoup
from requests_oauthlib import OAuth1Session

# 監視モード
kanshi = True


# 文字を小数点に変換、変換できない場合は0.0
def moji_float(x):

    try:
        result = float(x)
    except:
        result = 0.0

    return result


# 空文字の場合、前の値で補間、次の値で補間
def ffill_bfill(data):

    # ffill
    for y in range(1, len(data)):
        for x in range(len(data[y])):
            if not data[y][x]:
                data[y][x] = data[y - 1][x]

    # bfill
    for y in range(len(data) - 1)[::-1]:
        for x in range(len(data[y])):
            if not data[y][x]:
                data[y][x] = data[y + 1][x]

    return data


# 現在と過去を比較し記号に変換
def moji_sign(now, before):

    if now > before:
        result = ' ↗'
    elif now < before:
        result = ' ↘'
    else:
        result = ''
    return result


def scraping(url, tag):

    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
    }

    r = requests.get(url, headers=headers)

    if r.status_code == requests.codes.ok:

        soup = BeautifulSoup(r.content, 'html5lib')

        data = []

        for trs in soup.select(tag):

            # 列 => セル => セル内の列 => セル内のセル の順に取得
            table = [[[td.get_text(strip=True) for td in tr.select('td')]
                      for tr in tds.select('tr')]
                     for tds in trs.select('td > table > tbody')]

            for i in map(list, zip(*table)):
                temp = sum(i, [])
                data.append(temp[0:2] + list(map(moji_float, temp[2:])))

        res_data = ffill_bfill(data)
        res_sign = list(map(moji_sign, res_data[-1], res_data[-2]))

        return (res_data[-1], res_sign)


if __name__ == '__main__':

    # 現在の時刻から-5分し、10分単位に調整
    dt_now = datetime.datetime.now() - datetime.timedelta(minutes=5)
    dt_now -= datetime.timedelta(minutes=(dt_now.minute % 10))

    # 蒼社川 URL
    river_url = 'http://183.176.244.72/cgi/050_HQ_030_01.cgi?GID=050_HQ_030&UI=U777&SI=00000&LO=88&SRO=1&KKB=101100&DSP=11110&SKZ=111&NDT=1&MNU=1&BTY=IE6X&SSC=0&RBC=100&DT={}&GRP=USR020&TPG=1&PG=1&KTM=3'.format(
        dt_now.strftime('%Y%m%d%H%M'))

    # 河川情報をスクレイピング
    river_now, river_sign = scraping(
        river_url, 'body > table:nth-of-type(9) > tbody > tr')

    # 高野の水位が3.3mより高い
    if river_now[3] > 3.30 or kanshi:

        # 片山の水位が2.5mより高いまたは0・30分毎で投稿
        if dt_now.minute == 0 or dt_now.minute == 30 or river_now[
                2] > 2.50 or kanshi:

            # 玉川ダム URL
            dam_url = 'http://183.176.244.72/cgi/170_USER_010_01.cgi?GID=170_USER_010&UI=U777&SI=00000&MNU=1&LO=88&BTY=IE6X&NDT=1&SK=0000000&DT={}&GRP=USR004&TPG=1&PG=1&KTM=3'.format(
                dt_now.strftime('%Y%m%d%H%M'))

            # ダム情報をスクレイピング
            dam_now, dam_sign = scraping(
                dam_url, 'body > table:nth-of-type(7) > tbody > tr')

            # ダム 危険水位
            if dam_now[2] >= 158.0:
                dam_alert = ' 防災操作開始水位'
            else:
                dam_alert = ''

            # 片山 危険水位
            if river_now[2] >= 2.85:
                katayama_alert = ' はん濫危険水位'
            elif river_now[2] >= 2.60:
                katayama_alert = ' 避難判断水位'
            elif river_now[2] >= 2.40:
                katayama_alert = ' はん濫注意水位'
            elif river_now[2] >= 2.10:
                katayama_alert = ' 水防団待機水位'
            else:
                katayama_alert = ''

            # 高野 危険水位
            if river_now[3] >= 4.00:
                kouya_alert = ' はん濫注意水位'
            elif river_now[3] >= 3.50:
                kouya_alert = ' 水防団待機水位'
            else:
                kouya_alert = ''

            # 日付
            twit_date = '{}現在'.format(dt_now.strftime('%Y/%m/%d %H:%M'))

            # 玉川ダムの情報
            twit_dam = '【玉川ダム】\n貯水位:{0[2]:.2f}{1[2]}{2}\n全流入量:{0[3]:.2f}{1[3]}\n全放流量:{0[4]:.2f}{1[4]}\n貯水量:{0[5]}{1[5]}'.format(
                dam_now, dam_sign, dam_alert)

            # 蒼社川の水位
            twit_river = '【蒼社川】\n片山:{0[2]:.2f}m{1[2]}{2}\n高野:{0[3]:.2f}m{1[3]}{3}\n中通:{0[5]:.2f}m{1[4]}'.format(
                river_now, river_sign, katayama_alert, kouya_alert)

            # ツイート文字結合
            twit = '\n\n'.join([
                twit_date, twit_dam, twit_river,
                'http://i.river.go.jp/_-p01-_/p/xmn0501010/?mtm=10&swd=&prf=3801&twn=3801202'
            ]).strip()

            # print(len(twit))
            # print(twit)

            CK = ''
            CS = ''
            AT = ''
            ATS = ''

            twitter = OAuth1Session(CK, CS, AT, ATS)

            # 画像
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
                'Cache-Control':
                'no-cache',
                'Pragma':
                'no-cache'
            }

            url_image = 'http://www.pref.ehime.jp/kasen/Jpeg/Cam006/00_big.jpg'
            r = requests.get(url_image, headers=headers)

            # 画像アップロード
            files = {"media": r.content}
            url_media = 'https://upload.twitter.com/1.1/media/upload.json'
            req_media = twitter.post(url_media, files=files)

            if req_media.status_code != 200:
                print("画像アップデート失敗: %s", req_media.text)
                exit()

            # 画像ID取得
            media_id = json.loads(req_media.text)['media_id']
            params = {'status': twit, "media_ids": [media_id]}
            url_text = 'https://api.twitter.com/1.1/statuses/update.json'
            req_media = twitter.post(url_text, params=params)

            res = twitter.post(url_text, params=params)
crontab -e

8,18,28,38,48,58 * * * * python3 /home/imabari/workspace/dam-river_twit.py