import tempfile
from urllib.parse import urljoin
import requests
import twitter
from bs4 import BeautifulSoup
url = 'http://www.pref.ehime.jp/h25123/4415/mayoi.html'
r = requests.get(url)
if r.status_code == requests.codes.ok:
soup = BeautifulSoup(r.content, 'html.parser')
for table in soup.select('#tmp_contents > table.datatable'):
caption = table.select_one('caption > h3').get_text(strip=True)
if '犬' in caption:
tribe = '犬'
elif '猫' in caption:
tribe = '猫'
else:
tribe = ''
for row in table.select('tbody > tr')[1:]:
dogcat = [cell.get_text(strip=True) for cell in row.select('td')]
img_url = urljoin(url, row.select_one('img')['src'])
res = requests.get(img_url)
if res.status_code == requests.codes.ok:
with tempfile.TemporaryFile() as fp:
fp.write(res.content)
fp.seek(0)
fp.read()
twit = '愛媛県動物愛護センター {0}\n\n拾得・捕獲場所:{1[0]}\n種類:{1[1]}\n毛色:{1[2]}\n性別:{1[3]}\n体格:{1[4]}\n備考:{1[5]}\n\n#愛媛県 #迷い{2}\n\n{3}'.format(
caption, dogcat, tribe, url)
api = twitter.Api(
consumer_key='',
consumer_secret='',
access_token_key='',
access_token_secret='')
status = api.PostUpdate(twit, media=fp)