Asyncio Mini Project (Concurrent URL checker)
Goal
Build a script that:
- checks many URLs
- runs requests concurrently
- returns status codes
- uses timeouts and a concurrency limit
Implementation
url_checker.py
import asyncio
import aiohttp
async def fetch(session: aiohttp.ClientSession, sem: asyncio.Semaphore, url: str):
async with sem:
try:
async with session.get(url) as resp:
await resp.read()
return url, resp.status
except Exception as e:
return url, str(e)
async def main():
urls = [
"https://api.github.com",
"https://httpbin.org/status/404",
"https://httpbin.org/delay/2",
"https://example.com",
]
sem = asyncio.Semaphore(5)
timeout = aiohttp.ClientTimeout(total=5)
async with aiohttp.ClientSession(timeout=timeout) as session:
results = await asyncio.gather(*(fetch(session, sem, u) for u in urls))
for url, status in results:
print(url, "->", status)
if __name__ == "__main__":
asyncio.run(main())url_checker.py
import asyncio
import aiohttp
async def fetch(session: aiohttp.ClientSession, sem: asyncio.Semaphore, url: str):
async with sem:
try:
async with session.get(url) as resp:
await resp.read()
return url, resp.status
except Exception as e:
return url, str(e)
async def main():
urls = [
"https://api.github.com",
"https://httpbin.org/status/404",
"https://httpbin.org/delay/2",
"https://example.com",
]
sem = asyncio.Semaphore(5)
timeout = aiohttp.ClientTimeout(total=5)
async with aiohttp.ClientSession(timeout=timeout) as session:
results = await asyncio.gather(*(fetch(session, sem, u) for u in urls))
for url, status in results:
print(url, "->", status)
if __name__ == "__main__":
asyncio.run(main())Improvements
- read URLs from a file
- write results to CSV
- add retries with exponential backoff
๐งช Try It Yourself
Exercise 1 โ Your First Coroutine
Exercise 2 โ await asyncio.sleep
Exercise 3 โ Gather Two Coroutines
If this helped you, consider buying me a coffee โ
Buy me a coffeeWas this page helpful?
Let us know how we did
