first commit
This commit is contained in:
commit
acea803d8f
|
@ -0,0 +1,4 @@
|
||||||
|
dbs/*
|
||||||
|
venv/*
|
||||||
|
__pycache__/
|
||||||
|
*.pyc
|
|
@ -0,0 +1 @@
|
||||||
|
scrapy crawl basespider -a start_url=http://ctc.ac.cn/
|
|
@ -0,0 +1,3 @@
|
||||||
|
scrapy==2.8.0
|
||||||
|
scrapyd==1.4.1
|
||||||
|
scrapyd-client==1.2.3
|
|
@ -0,0 +1,14 @@
|
||||||
|
# Automatically created by: scrapy startproject
|
||||||
|
#
|
||||||
|
# For more information about the [deploy] section see:
|
||||||
|
# https://scrapyd.readthedocs.io/en/latest/deploy.html
|
||||||
|
|
||||||
|
[settings]
|
||||||
|
default = zcspider.settings
|
||||||
|
|
||||||
|
[scrapyd]
|
||||||
|
bind_address = 0.0.0.0
|
||||||
|
|
||||||
|
[deploy zc1]
|
||||||
|
# url = http://localhost:6800/
|
||||||
|
project = zcspider
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Define here the models for your scraped items
|
||||||
|
#
|
||||||
|
# See documentation in:
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/items.html
|
||||||
|
|
||||||
|
import scrapy
|
||||||
|
|
||||||
|
|
||||||
|
class ZcspiderItem(scrapy.Item):
|
||||||
|
# define the fields for your item here like:
|
||||||
|
# name = scrapy.Field()
|
||||||
|
pass
|
|
@ -0,0 +1,103 @@
|
||||||
|
# Define here the models for your spider middleware
|
||||||
|
#
|
||||||
|
# See documentation in:
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||||
|
|
||||||
|
from scrapy import signals
|
||||||
|
|
||||||
|
# useful for handling different item types with a single interface
|
||||||
|
from itemadapter import is_item, ItemAdapter
|
||||||
|
|
||||||
|
|
||||||
|
class ZcspiderSpiderMiddleware:
|
||||||
|
# Not all methods need to be defined. If a method is not defined,
|
||||||
|
# scrapy acts as if the spider middleware does not modify the
|
||||||
|
# passed objects.
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_crawler(cls, crawler):
|
||||||
|
# This method is used by Scrapy to create your spiders.
|
||||||
|
s = cls()
|
||||||
|
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||||
|
return s
|
||||||
|
|
||||||
|
def process_spider_input(self, response, spider):
|
||||||
|
# Called for each response that goes through the spider
|
||||||
|
# middleware and into the spider.
|
||||||
|
|
||||||
|
# Should return None or raise an exception.
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_spider_output(self, response, result, spider):
|
||||||
|
# Called with the results returned from the Spider, after
|
||||||
|
# it has processed the response.
|
||||||
|
|
||||||
|
# Must return an iterable of Request, or item objects.
|
||||||
|
for i in result:
|
||||||
|
yield i
|
||||||
|
|
||||||
|
def process_spider_exception(self, response, exception, spider):
|
||||||
|
# Called when a spider or process_spider_input() method
|
||||||
|
# (from other spider middleware) raises an exception.
|
||||||
|
|
||||||
|
# Should return either None or an iterable of Request or item objects.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process_start_requests(self, start_requests, spider):
|
||||||
|
# Called with the start requests of the spider, and works
|
||||||
|
# similarly to the process_spider_output() method, except
|
||||||
|
# that it doesn’t have a response associated.
|
||||||
|
|
||||||
|
# Must return only requests (not items).
|
||||||
|
for r in start_requests:
|
||||||
|
yield r
|
||||||
|
|
||||||
|
def spider_opened(self, spider):
|
||||||
|
spider.logger.info("Spider opened: %s" % spider.name)
|
||||||
|
|
||||||
|
|
||||||
|
class ZcspiderDownloaderMiddleware:
|
||||||
|
# Not all methods need to be defined. If a method is not defined,
|
||||||
|
# scrapy acts as if the downloader middleware does not modify the
|
||||||
|
# passed objects.
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_crawler(cls, crawler):
|
||||||
|
# This method is used by Scrapy to create your spiders.
|
||||||
|
s = cls()
|
||||||
|
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||||
|
return s
|
||||||
|
|
||||||
|
def process_request(self, request, spider):
|
||||||
|
# Called for each request that goes through the downloader
|
||||||
|
# middleware.
|
||||||
|
|
||||||
|
# Must either:
|
||||||
|
# - return None: continue processing this request
|
||||||
|
# - or return a Response object
|
||||||
|
# - or return a Request object
|
||||||
|
# - or raise IgnoreRequest: process_exception() methods of
|
||||||
|
# installed downloader middleware will be called
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_response(self, request, response, spider):
|
||||||
|
# Called with the response returned from the downloader.
|
||||||
|
|
||||||
|
# Must either;
|
||||||
|
# - return a Response object
|
||||||
|
# - return a Request object
|
||||||
|
# - or raise IgnoreRequest
|
||||||
|
return response
|
||||||
|
|
||||||
|
def process_exception(self, request, exception, spider):
|
||||||
|
# Called when a download handler or a process_request()
|
||||||
|
# (from other downloader middleware) raises an exception.
|
||||||
|
|
||||||
|
# Must either:
|
||||||
|
# - return None: continue processing this exception
|
||||||
|
# - return a Response object: stops process_exception() chain
|
||||||
|
# - return a Request object: stops process_exception() chain
|
||||||
|
pass
|
||||||
|
|
||||||
|
def spider_opened(self, spider):
|
||||||
|
spider.logger.info("Spider opened: %s" % spider.name)
|
|
@ -0,0 +1,45 @@
|
||||||
|
# Define your item pipelines here
|
||||||
|
#
|
||||||
|
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
|
||||||
|
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||||
|
|
||||||
|
|
||||||
|
# useful for handling different item types with a single interface
|
||||||
|
from scrapy.exceptions import IgnoreRequest
|
||||||
|
import psycopg2
|
||||||
|
|
||||||
|
class ZcspiderPipeline:
|
||||||
|
|
||||||
|
def open_spider(self, spider):
|
||||||
|
print('初始化数据库连接')
|
||||||
|
self.conn = psycopg2.connect(host="49.232.14.174",database="zcspider", user="postgres", password="zcDsj2021")
|
||||||
|
self.cur = self.conn.cursor()
|
||||||
|
self.cur.execute("delete from content where domain = %s", (spider.domain, ))
|
||||||
|
# rows = self.cur.fetchall()
|
||||||
|
# spider.visited_urls_last = [i[0] for i in rows] if len(rows)>1 else []
|
||||||
|
|
||||||
|
# def process_request(self, request, spider):
|
||||||
|
# print('检查当前地址有没有存储过')
|
||||||
|
# self.cur.execute("SELECT url from content where url= %s", request.url)
|
||||||
|
# row = self.cur.fetchone() is not None
|
||||||
|
# print(row)
|
||||||
|
# if row:
|
||||||
|
# raise IgnoreRequest(f"Duplicate URL found: {request.url}")
|
||||||
|
# return request
|
||||||
|
|
||||||
|
def process_item(self, item, spider):
|
||||||
|
try:
|
||||||
|
self.cur.execute("INSERT INTO content (domain, url, text) VALUES(%s, %s, %s)",
|
||||||
|
(item['domain'], item['url'], item['text']))
|
||||||
|
self.conn.commit()
|
||||||
|
except:
|
||||||
|
self.conn.rollback()
|
||||||
|
raise
|
||||||
|
return item
|
||||||
|
|
||||||
|
# 结束,关闭连接
|
||||||
|
def close_spider(self, spider):
|
||||||
|
# 关闭游标
|
||||||
|
self.cur.close()
|
||||||
|
# 关闭连接
|
||||||
|
self.conn.close()
|
|
@ -0,0 +1,97 @@
|
||||||
|
# Scrapy settings for zcspider project
|
||||||
|
#
|
||||||
|
# For simplicity, this file contains only settings considered important or
|
||||||
|
# commonly used. You can find more settings consulting the documentation:
|
||||||
|
#
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/settings.html
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||||
|
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||||
|
|
||||||
|
BOT_NAME = "zcspider"
|
||||||
|
|
||||||
|
SPIDER_MODULES = ["zcspider.spiders"]
|
||||||
|
NEWSPIDER_MODULE = "zcspider.spiders"
|
||||||
|
|
||||||
|
|
||||||
|
# Crawl responsibly by identifying yourself (and your website) on the user-agent
|
||||||
|
#USER_AGENT = "zcspider (+http://www.yourdomain.com)"
|
||||||
|
|
||||||
|
# Obey robots.txt rules
|
||||||
|
ROBOTSTXT_OBEY = True
|
||||||
|
|
||||||
|
# Configure maximum concurrent requests performed by Scrapy (default: 16)
|
||||||
|
#CONCURRENT_REQUESTS = 32
|
||||||
|
|
||||||
|
# Configure a delay for requests for the same website (default: 0)
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
|
||||||
|
# See also autothrottle settings and docs
|
||||||
|
#DOWNLOAD_DELAY = 3
|
||||||
|
# The download delay setting will honor only one of:
|
||||||
|
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
|
||||||
|
#CONCURRENT_REQUESTS_PER_IP = 16
|
||||||
|
|
||||||
|
# Disable cookies (enabled by default)
|
||||||
|
#COOKIES_ENABLED = False
|
||||||
|
|
||||||
|
# Disable Telnet Console (enabled by default)
|
||||||
|
#TELNETCONSOLE_ENABLED = False
|
||||||
|
|
||||||
|
# Override the default request headers:
|
||||||
|
#DEFAULT_REQUEST_HEADERS = {
|
||||||
|
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
||||||
|
# "Accept-Language": "en",
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable or disable spider middlewares
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||||
|
#SPIDER_MIDDLEWARES = {
|
||||||
|
# "zcspider.middlewares.ZcspiderSpiderMiddleware": 543,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable or disable downloader middlewares
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||||
|
#DOWNLOADER_MIDDLEWARES = {
|
||||||
|
# "zcspider.middlewares.ZcspiderDownloaderMiddleware": 543,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable or disable extensions
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/extensions.html
|
||||||
|
#EXTENSIONS = {
|
||||||
|
# "scrapy.extensions.telnet.TelnetConsole": None,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Configure item pipelines
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||||
|
#ITEM_PIPELINES = {
|
||||||
|
# "zcspider.pipelines.ZcspiderPipeline": 300,
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Enable and configure the AutoThrottle extension (disabled by default)
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
|
||||||
|
#AUTOTHROTTLE_ENABLED = True
|
||||||
|
# The initial download delay
|
||||||
|
#AUTOTHROTTLE_START_DELAY = 5
|
||||||
|
# The maximum download delay to be set in case of high latencies
|
||||||
|
#AUTOTHROTTLE_MAX_DELAY = 60
|
||||||
|
# The average number of requests Scrapy should be sending in parallel to
|
||||||
|
# each remote server
|
||||||
|
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
|
||||||
|
# Enable showing throttling stats for every response received:
|
||||||
|
#AUTOTHROTTLE_DEBUG = False
|
||||||
|
|
||||||
|
# Enable and configure HTTP caching (disabled by default)
|
||||||
|
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
|
||||||
|
#HTTPCACHE_ENABLED = True
|
||||||
|
#HTTPCACHE_EXPIRATION_SECS = 0
|
||||||
|
#HTTPCACHE_DIR = "httpcache"
|
||||||
|
#HTTPCACHE_IGNORE_HTTP_CODES = []
|
||||||
|
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
|
||||||
|
|
||||||
|
# Set settings whose default value is deprecated to a future-proof value
|
||||||
|
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
|
||||||
|
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
|
||||||
|
FEED_EXPORT_ENCODING = "utf-8"
|
||||||
|
|
||||||
|
ITEM_PIPELINES = {
|
||||||
|
'zcspider.pipelines.ZcspiderPipeline': 300,
|
||||||
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
# This package will contain the spiders of your Scrapy project
|
||||||
|
#
|
||||||
|
# Please refer to the documentation for information on how to create and manage
|
||||||
|
# your spiders.
|
|
@ -0,0 +1,33 @@
|
||||||
|
import scrapy
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
import html2text
|
||||||
|
|
||||||
|
class BaseSpider(scrapy.Spider):
|
||||||
|
name = "basespider"
|
||||||
|
start_urls = ["http://ctc.ac.cn/"]
|
||||||
|
visited_urls = set()
|
||||||
|
|
||||||
|
def __init__(self, start_url: str, name=None, **kwargs):
|
||||||
|
super().__init__(name, **kwargs)
|
||||||
|
self.domain = urlparse(start_url).netloc
|
||||||
|
self.start_urls = [start_url]
|
||||||
|
self.ext = tuple(['.png', '.jpg', '.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx'])
|
||||||
|
|
||||||
|
def parse(self, response):
|
||||||
|
self.visited_urls.add(response.url)
|
||||||
|
h = html2text.HTML2Text()
|
||||||
|
h.ignore_links = True # 忽略所有链接
|
||||||
|
# 提取纯文本内容
|
||||||
|
text = h.handle(response.body.decode())
|
||||||
|
|
||||||
|
yield {
|
||||||
|
'domain': self.domain,
|
||||||
|
'url': response.url,
|
||||||
|
'text': text,
|
||||||
|
}
|
||||||
|
|
||||||
|
for link in response.css("a::attr('href')").getall():
|
||||||
|
if link not in self.visited_urls:
|
||||||
|
if link.startswith("/") or urlparse(link).netloc == self.domain:
|
||||||
|
if not link.endswith(self.ext):
|
||||||
|
yield scrapy.Request(response.urljoin(link), callback=self.parse)
|
Loading…
Reference in New Issue