-
Notifications
You must be signed in to change notification settings - Fork 118
/
Copy pathgoat.py
96 lines (80 loc) · 3.7 KB
/
goat.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
"""
This is an example web scraper for goat.com.
To run this scraper set env variable $SCRAPFLY_KEY with your scrapfly API key:
$ export $SCRAPFLY_KEY="your key from https://scrapfly.io/dashboard"
"""
import os
import json
import math
from datetime import datetime
from uuid import uuid4
from typing import Dict, List
from pathlib import Path
from loguru import logger as log
from urllib.parse import quote, urlencode
from scrapfly import ScrapeConfig, ScrapflyClient, ScrapeApiResponse
SCRAPFLY = ScrapflyClient(key=os.environ["SCRAPFLY_KEY"])
BASE_CONFIG = {
# bypass goat.com web scraping blocking(cloudflare)
"asp": True,
# set the proxy country to US
"country": "US",
}
output = Path(__file__).parent / "results"
output.mkdir(exist_ok=True)
def find_hidden_data(result: ScrapeApiResponse) -> dict:
"""extract hidden NEXT_DATA from page html"""
data = result.selector.css("script#__NEXT_DATA__::text").get()
data = json.loads(data)
return data
async def scrape_products(urls: List[str]) -> dict:
"""scrape goat.com product pages for product data"""
to_scrape = [ScrapeConfig(url, **BASE_CONFIG) for url in urls]
products = []
async for response in SCRAPFLY.concurrent_scrape(to_scrape):
data = find_hidden_data(response)
product = data["props"]["pageProps"]["productTemplate"]
if data["props"]["pageProps"]["offers"]:
product["offers"] = data["props"]["pageProps"]["offers"]["offerData"]
else:
product["offers"] = None
products.append(product)
log.success(f"scraped {len(products)} product listings from product pages")
return products
async def scrape_search(query: str, max_pages: int = 10) -> List[Dict]:
"""scrape goat.com search pages for product listings"""
def make_page_url(page: int = 1):
params = {
"c": "ciojs-client-2.29.12", # this is hardcoded API version
"key": "key_XT7bjdbvjgECO5d8", # API key which is hardcoded in the client
"i": str(uuid4()), # unique id for each request, generated by UUID4
"s": "2",
"page": page,
"num_results_per_page": "24",
"sort_by": "relevance",
"sort_order": "descending",
"fmt_options[hidden_fields]": "gp_lowest_price_cents_3",
"fmt_options[hidden_fields]": "gp_instant_ship_lowest_price_cents_3",
"fmt_options[hidden_facets]": "gp_lowest_price_cents_3",
"fmt_options[hidden_facets]": "gp_instant_ship_lowest_price_cents_3",
"_dt": int(datetime.utcnow().timestamp() * 1000), # current timestamp in milliseconds
}
return f"https://ac.cnstrc.com/search/{quote(query)}?{urlencode(params)}"
url_first_page = make_page_url(page=1)
log.info(f"scraping product search with query {query}")
result_first_page = await SCRAPFLY.async_scrape(ScrapeConfig(url=url_first_page, **BASE_CONFIG))
first_page = json.loads(result_first_page.content)["response"]
results = [result["data"] for result in first_page["results"]]
# find total page count
total_pages = math.ceil(first_page["total_num_results"] / 24)
if max_pages and max_pages < total_pages:
total_pages = max_pages
# scrape remaining pages
log.info(f"scraping search pagination ({total_pages-1} more pages)")
to_scrape = [ScrapeConfig(make_page_url(page=page), **BASE_CONFIG) for page in range(2, total_pages + 1)]
async for result in SCRAPFLY.concurrent_scrape(to_scrape):
data = json.loads(result.content)
items = [result["data"] for result in data["response"]["results"]]
results.extend(items)
log.success(f"scraped {len(results)} product listings from search")
return results