Skip to content

Replace dependency requests with httpx #12742

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ repos:
- --ignore-missing-imports
- --install-types # See mirrors-mypy README.md
- --non-interactive
additional_dependencies: [types-requests]

- repo: https://github.com/pre-commit/mirrors-prettier
rev: "v4.0.0-alpha.8"
Expand Down
12 changes: 10 additions & 2 deletions machine_learning/linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,24 @@
Rating). We try to best fit a line through dataset and estimate the parameters.
"""

# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "numpy",
# ]
# ///

import httpx
import numpy as np
import requests


def collect_dataset():
"""Collect dataset of CSGO
The dataset contains ADR vs Rating of a Player
:return : dataset obtained from the link, as matrix
"""
response = requests.get(
response = httpx.get(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
"master/Week1/ADRvsRating.csv",
timeout=10,
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
beautifulsoup4
fake-useragent
httpx
imageio
keras
lxml
Expand All @@ -8,7 +9,6 @@ numpy
opencv-python
pandas
pillow
requests
rich
scikit-learn
sphinx-pyproject
Expand Down
6 changes: 3 additions & 3 deletions scripts/validate_solutions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "pytest",
# "requests",
# ]
# ///

Expand All @@ -15,8 +15,8 @@
import pathlib
from types import ModuleType

import httpx
import pytest
import requests

PROJECT_EULER_DIR_PATH = pathlib.Path.cwd().joinpath("project_euler")
PROJECT_EULER_ANSWERS_PATH = pathlib.Path.cwd().joinpath(
Expand Down Expand Up @@ -66,7 +66,7 @@ def added_solution_file_path() -> list[pathlib.Path]:
"Accept": "application/vnd.github.v3+json",
"Authorization": "token " + os.environ["GITHUB_TOKEN"],
}
files = requests.get(get_files_url(), headers=headers, timeout=10).json()
files = httpx.get(get_files_url(), headers=headers, timeout=10).json()
for file in files:
filepath = pathlib.Path.cwd().joinpath(file["filename"])
if (
Expand Down
13 changes: 10 additions & 3 deletions web_programming/co2_emission.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,29 @@
Get CO2 emission data from the UK CarbonIntensity API
"""

# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///

from datetime import date

import requests
import httpx

BASE_URL = "https://api.carbonintensity.org.uk/intensity"


# Emission in the last half hour
def fetch_last_half_hour() -> str:
last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0]
last_half_hour = httpx.get(BASE_URL, timeout=10).json()["data"][0]
return last_half_hour["intensity"]["actual"]


# Emissions in a specific date range
def fetch_from_to(start, end) -> list:
return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]
return httpx.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]


if __name__ == "__main__":
Expand Down
12 changes: 10 additions & 2 deletions web_programming/covid_stats_via_xpath.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,17 @@
more convenient to use in Python web projects (e.g. Django or Flask-based)
"""

# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "lxml",
# ]
# ///

from typing import NamedTuple

import requests
import httpx
from lxml import html


Expand All @@ -19,7 +27,7 @@ class CovidData(NamedTuple):
def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData:
xpath_str = '//div[@class = "maincounter-number"]/span/text()'
return CovidData(
*html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str)
*html.fromstring(httpx.get(url, timeout=10).content).xpath(xpath_str)
)


Expand Down
18 changes: 16 additions & 2 deletions web_programming/crawl_google_results.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,28 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "fake-useragent",
# "httpx",
# ]
# ///

import sys
import webbrowser

import requests
import httpx
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

if __name__ == "__main__":
print("Googling.....")
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10)
res = httpx.get(
url,
headers={"UserAgent": UserAgent().random},
timeout=10,
follow_redirects=True,
)
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
Expand Down
12 changes: 10 additions & 2 deletions web_programming/crawl_google_scholar_citation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,15 @@
using title and year of publication, and volume and pages of journal.
"""

import requests
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///

import httpx
from bs4 import BeautifulSoup


Expand All @@ -12,7 +20,7 @@ def get_citation(base_url: str, params: dict) -> str:
Return the citation number.
"""
soup = BeautifulSoup(
requests.get(base_url, params=params, timeout=10).content, "html.parser"
httpx.get(base_url, params=params, timeout=10).content, "html.parser"
)
div = soup.find("div", attrs={"class": "gs_ri"})
anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a")
Expand Down
11 changes: 9 additions & 2 deletions web_programming/currency_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,16 @@
https://www.amdoren.com
"""

# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///

import os

import requests
import httpx

URL_BASE = "https://www.amdoren.com/api/currency.php"

Expand Down Expand Up @@ -176,7 +183,7 @@ def convert_currency(
params = locals()
# from is a reserved keyword
params["from"] = params.pop("from_")
res = requests.get(URL_BASE, params=params, timeout=10).json()
res = httpx.get(URL_BASE, params=params, timeout=10).json()
return str(res["amount"]) if res["error"] == 0 else res["error_message"]


Expand Down
14 changes: 11 additions & 3 deletions web_programming/current_stock_price.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,12 @@
import requests
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///

import httpx
from bs4 import BeautifulSoup

"""
Expand All @@ -20,8 +28,8 @@ def stock_price(symbol: str = "AAPL") -> str:
True
"""
url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}"
yahoo_finance_source = requests.get(
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10
yahoo_finance_source = httpx.get(
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10, follow_redirects=True
).text
soup = BeautifulSoup(yahoo_finance_source, "html.parser")

Expand Down
13 changes: 10 additions & 3 deletions web_programming/current_weather.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
import requests
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///

import httpx

# Put your API key(s) here
OPENWEATHERMAP_API_KEY = ""
Expand All @@ -19,13 +26,13 @@ def current_weather(location: str) -> list[dict]:
weather_data = []
if OPENWEATHERMAP_API_KEY:
params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY}
response_openweathermap = requests.get(
response_openweathermap = httpx.get(
OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10
)
weather_data.append({"OpenWeatherMap": response_openweathermap.json()})
if WEATHERSTACK_API_KEY:
params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY}
response_weatherstack = requests.get(
response_weatherstack = httpx.get(
WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10
)
weather_data.append({"Weatherstack": response_weatherstack.json()})
Expand Down
12 changes: 10 additions & 2 deletions web_programming/daily_horoscope.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,12 @@
import requests
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///

import httpx
from bs4 import BeautifulSoup


Expand All @@ -7,7 +15,7 @@ def horoscope(zodiac_sign: int, day: str) -> str:
"https://www.horoscope.com/us/horoscopes/general/"
f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}"
)
soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser")
soup = BeautifulSoup(httpx.get(url, timeout=10).content, "html.parser")
return soup.find("div", class_="main-horoscope").p.text


Expand Down
12 changes: 10 additions & 2 deletions web_programming/download_images_from_google_query.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,18 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///

import json
import os
import re
import sys
import urllib.request

import requests
import httpx
from bs4 import BeautifulSoup

headers = {
Expand Down Expand Up @@ -39,7 +47,7 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5)
"ijn": "0",
}

html = requests.get(
html = httpx.get(
"https://www.google.com/search", params=params, headers=headers, timeout=10
)
soup = BeautifulSoup(html.text, "html.parser")
Expand Down
21 changes: 17 additions & 4 deletions web_programming/emails_from_url.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
"""Get the site emails from URL."""

# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///

from __future__ import annotations

__author__ = "Muhammad Umer Farooq"
Expand All @@ -13,7 +20,7 @@
from html.parser import HTMLParser
from urllib import parse

import requests
import httpx


class Parser(HTMLParser):
Expand Down Expand Up @@ -72,7 +79,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:

try:
# Open URL
r = requests.get(url, timeout=10)
r = httpx.get(url, timeout=10, follow_redirects=True)

# pass the raw HTML to the parser to get links
parser.feed(r.text)
Expand All @@ -81,9 +88,15 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
valid_emails = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
# Check if the link is already absolute
if not link.startswith("http://") and not link.startswith("https://"):
# Prepend protocol only if link starts with domain, normalize otherwise
if link.startswith(domain):
link = f"https://{link}"
else:
link = parse.urljoin(f"https://{domain}", link)
try:
read = requests.get(link, timeout=10)
read = httpx.get(link, timeout=10, follow_redirects=True)
# Get the valid email.
emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text)
# If not in list then append it.
Expand Down
Loading
Loading