asyncioでスクレイピングを高速化

import asyncio
import aiohttp
import requests
from bs4 import BeautifulSoup


async def scraping(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:

            html = await response.text()
            soup = BeautifulSoup(html, 'html5lib')

            result = []

    return result


if __name__ == "__main__":

    urls = []

    loop = asyncio.get_event_loop()
    done, pending = loop.run_until_complete(
        asyncio.wait([scraping(url) for url in urls]))
    result = [d.result() for d in done]