chore: add initial scrapy code

This commit is contained in:
2022-06-22 20:39:31 +01:00
parent 5d0c7b19fa
commit e49fa7a346
11 changed files with 1612 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
source .venv/bin/activate
scrapy crawl CountrydownloaderSpider -O ../../data/scrapy/raw_country_data/countries.json

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,18 @@
[tool.poetry]
name = "wikipedia_country_scraper"
version = "0.1.0"
description = ""
authors = ["Daniel Tomlinson <dtomlinson@panaetius.co.uk>"]
[tool.poetry.dependencies]
python = "^3.8"
Scrapy = "^2.6.1"
scrapy-user-agents = "^0.1.1"
ipython = "^8.4.0"
Pillow = "^9.1.1"
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"

View File

@@ -0,0 +1,12 @@
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.io/en/latest/deploy.html
[settings]
default = wikipedia_country_scraper.settings
shell = ipython
[deploy]
#url = http://localhost:6800/
project = wikipedia_country_scraper

View File

@@ -0,0 +1,18 @@
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class WikipediaCountryScraperItem(scrapy.Item):
country_url = scrapy.Field()
short_country_name = scrapy.Field()
country = scrapy.Field()
flag_description = scrapy.Field()
anthem_native_title = scrapy.Field()
anthem_english_title = scrapy.Field()
file_urls = scrapy.Field()
files = scrapy.Field()

View File

@@ -0,0 +1,103 @@
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class WikipediaCountryScraperSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class WikipediaCountryScraperDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)

View File

@@ -0,0 +1,28 @@
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import re
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.pipelines.files import FilesPipeline
class WikipediaCountryScraperPipeline:
def process_item(self, item, spider):
return item
class WikipediaCountryScraperFilesPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, *, item=None):
print(f"request URLs: {request.url}")
flag_filename = re.search(r"([^\/]*)$", request.url)
anthem_filename = re.search(r"(?P<filename>(?<=File\:)[^\$]*)", request.url)
if isinstance(flag_filename, re.Match):
return f"files/{flag_filename[1]}"
elif anthem_filename["filename"].endswith(".mp3"):
return f"files/{anthem_filename['filename']}"

View File

@@ -0,0 +1,93 @@
import pathlib
# Scrapy settings for wikipedia_country_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "wikipedia_country_scraper"
SPIDER_MODULES = ["wikipedia_country_scraper.spiders"]
NEWSPIDER_MODULE = "wikipedia_country_scraper.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'wikipedia_country_scraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'wikipedia_country_scraper.middlewares.WikipediaCountryScraperSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
"scrapy.downloadermiddlewares.useragent.UserAgentMiddleware": None,
"scrapy_user_agents.middlewares.RandomUserAgentMiddleware": 400,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"wikipedia_country_scraper.pipelines.WikipediaCountryScraperFilesPipeline": 300,
# "scrapy.pipelines.files.FilesPipeline": 1
}
FILES_STORE = str(pathlib.Path(__file__).resolve().parents[3] / "data" / "scrapy" / "raw_country_data")
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

View File

@@ -0,0 +1,4 @@
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.

View File

@@ -0,0 +1,122 @@
from __future__ import annotations
import re
import scrapy
from scrapy.http import TextResponse
from wikipedia_country_scraper.items import WikipediaCountryScraperItem
class CountrydownloaderSpider(scrapy.Spider):
name = "CountrydownloaderSpider"
def start_requests(self):
return [
scrapy.Request(
url="https://en.wikipedia.org/wiki/List_of_sovereign_states", callback=self.extract_country_urls
)
]
def extract_country_urls(self, response: TextResponse):
country_urls_xpath = response.xpath(
"//table[contains(@class, 'sortable') and contains(@class, 'wikitable')]/tbody/tr[not(contains(@style, 'background'))]/td[1 and contains(@style, 'vertical-align:top;')]/b/a/@href"
).getall()
# for url in country_urls_xpath:
for url in country_urls_xpath[:3]:
regex_match = re.search(r"\/wiki\/(?P<short_country_name>[^$]*)", url)
yield scrapy.Request(
url=f"https://en.wikipedia.org{url}",
callback=self.extract_country_information,
cb_kwargs={
"country_item": {
"country_url": f"https://en.wikipedia.org{url}",
"short_country_name": regex_match["short_country_name"]
if isinstance(regex_match, re.Match)
else None,
}
},
)
def extract_country_information(self, response: TextResponse, country_item: dict):
country_information_xpath = response.xpath("//table[contains(@class, 'infobox')]/tbody/tr").getall()
flag_image_url = response.xpath(
"//table[contains(@class, 'infobox')]/tbody/tr[2]/td/div/div[1]/div[1]/a/@href"
).get()
flag_description_url = response.xpath(
"//table[contains(@class, 'infobox')]/tbody/tr[2]/td/div/div[1]/div[2]/a/@href"
).get()
anthem_file_url = response.xpath(
"//table[contains(@class, 'infobox')]/tbody/tr/td[contains(@class, 'anthem')]//source[@data-title='MP3']/@src"
).get()
anthem_native_title = response.xpath(
"//table[contains(@class, 'infobox')]/tbody/tr/td[contains(@class, 'anthem')]/a/@title"
).get()
anthem_english_title = response.xpath(
"//table[contains(@class, 'infobox')]/tbody/tr/td[contains(@class, 'anthem')]/a/text()"
).get()
country_item = {
**country_item,
"country": country_information_xpath,
"anthem": {"anthem_native_title": anthem_native_title, "anthem_english_title": anthem_english_title},
}
yield scrapy.Request(
url=f"https://en.wikipedia.org{flag_description_url}",
callback=self.extract_flag_description,
cb_kwargs={
"country_item": country_item,
"urls": {
"flag_image_url": f"https://en.wikipedia.org{flag_image_url}",
"anthem_file_url": f"https:{anthem_file_url}",
},
},
)
def extract_flag_description(self, response: TextResponse, country_item: dict, urls: dict):
flag_description_xpath = response.xpath(
"//div[contains(@id, 'mw-content-text')]/div/p[not(contains(@class, 'mw-empty-elt'))]"
).get()
country_item = {**country_item, "flag_description": flag_description_xpath}
yield scrapy.Request(
url=urls["flag_image_url"],
callback=self.extract_flag_images,
cb_kwargs={
"country_item": country_item,
"urls": urls,
},
)
def extract_flag_images(self, response: TextResponse, country_item: dict, urls: dict):
flag_image_xpath = response.xpath("//div[contains(@class, 'fullImageLink')]/a/@href").get()
country_item = {**country_item, "flag_image_url": f"https:{flag_image_xpath}"}
country_scrapy_item = WikipediaCountryScraperItem()
country_scrapy_item["country_url"] = country_item["country_url"]
country_scrapy_item["short_country_name"] = country_item["short_country_name"]
# country_scrapy_item["country"] = country_item["country"]
country_scrapy_item["flag_description"] = country_item["flag_description"]
country_scrapy_item["anthem_native_title"] = country_item["anthem"]["anthem_native_title"]
country_scrapy_item["anthem_english_title"] = country_item["anthem"]["anthem_english_title"]
country_scrapy_item["file_urls"] = [country_item["flag_image_url"], urls["anthem_file_url"]]
yield country_scrapy_item
# def extract_anthem_file(self, response: TextResponse, country_item: dict, urls: dict):
# anthem_file_xpath = response.xpath(
# "//table[contains(@class, 'infobox')]/tbody/tr/td[contains(@class, 'anthem')]//span[contains(@class, 'audio')]/a/@href"
# ).get()
# country_scrapy_item = WikipediaCountryScraperItem()
# country_scrapy_item["country_url"] = country_item["country_url"]
# country_scrapy_item["short_country_name"] = country_item["short_country_name"]
# country_scrapy_item["country"] = country_item["country"]
# country_scrapy_item["flag_description"] = country_item["flag_description"]
# country_scrapy_item["file_urls"] = [country_item["flag_image_url"], f"https:{anthem_file_xpath}"]