scrapy doesn't download images - web-scraping

I am trying to download images from different urls via scrapy. I'm new to python and scrapy so maybe I'm missing something obvious. This is my first post on stack overflow. Help would be really appreciated!
Here are my different files :
items.py
from scrapy.item import Item, Field
class ImagesTestItem(Item):
image_urls = Field()
image_names =Field()
images = Field()
pass
setting.py:
from scrapy import log
log.msg("This is a warning", level=log.WARNING)
log.msg("This is a error", level=log.ERROR)
BOT_NAME = 'images_test'
SPIDER_MODULES = ['images_test.spiders']
NEWSPIDER_MODULE = 'images_test.spiders'
ITEM_PIPELINES = {'images_test.pipelines.images_test': 1}
IMAGES_STORE = '/Users/Coralie/Documents/scrapy/images_test/images'
DOWNLOAD_DELAY = 5
STATS_CLASS = True
spider:
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.loader import XPathItemLoader
from scrapy.selector import HtmlXPathSelector
from scrapy.item import Item,Field
from scrapy.utils.response import get_base_url
import logging
from scrapy.log import ScrapyFileLogObserver
logfile = open('testlog.log', 'w')
log_observer = ScrapyFileLogObserver(logfile, level=logging.DEBUG)
log_observer.start()
class images_test(CrawlSpider):
name = "images_test"
allowed_domains = ['veranstaltungszentrum.bbaw.de']
start_urls = ['http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib0%d_g.jpg' % i for i in xrange(9) ]
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
items = []
sites = hxs.select()
number = 0
for site in sites:
xpath = '//img/#src'
image_urls = hxs.select('//img/#src').extract()
item['image_urls'] = ["http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib0x_g.jpg" + x for x in image_urls]
items.append(item)
number = number + 1
return item
print item['image_urls']
pipelines.py
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy.http import Request
from PIL import Image
from scrapy import log
log.msg("This is a warning", level=log.WARNING)
log.msg("This is a error", level=log.ERROR)
scrapy.log.ERROR
class images_test(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield Request(image_url)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
item['image_paths'] = image_paths
return item
the log is saying the following:
/Library/Python/2.7/site-packages/Scrapy-0.20.2-py2.7.egg/scrapy/settings/deprecated.py:26: ScrapyDeprecationWarning: You are using the following settings which are deprecated or obsolete (ask scrapy-users#googlegroups.com for alternatives):
STATS_ENABLED: no longer supported (change STATS_CLASS instead)
warnings.warn(msg, ScrapyDeprecationWarning)
2014-01-03 11:36:48+0100 [scrapy] INFO: Scrapy 0.20.2 started (bot: images_test)
2014-01-03 11:36:48+0100 [scrapy] DEBUG: Optional features available: ssl, http11
2014-01-03 11:36:48+0100 [scrapy] DEBUG: Overridden settings: {'NEWSPIDER_MODULE': 'images_test.spiders', 'SPIDER_MODULES': ['images_test.spiders'], 'DOWNLOAD_DELAY': 5, 'BOT_NAME': 'images_test'}
2014-01-03 11:36:48+0100 [scrapy] DEBUG: Enabled extensions: LogStats, TelnetConsole, CloseSpider, WebService, CoreStats, SpiderState
2014-01-03 11:36:49+0100 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, RedirectMiddleware, CookiesMiddleware, ChunkedTransferMiddleware, DownloaderStats
2014-01-03 11:36:49+0100 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2014-01-03 11:36:49+0100 [scrapy] WARNING: This is a warning
2014-01-03 11:36:49+0100 [scrapy] ERROR: This is a error
2014-01-03 11:36:49+0100 [scrapy] DEBUG: Enabled item pipelines: images_test
2014-01-03 11:36:49+0100 [images_test] INFO: Spider opened
2014-01-03 11:36:49+0100 [images_test] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2014-01-03 11:36:49+0100 [scrapy] DEBUG: Telnet console listening on 0.0.0.0:6023
2014-01-03 11:36:49+0100 [scrapy] DEBUG: Web service listening on 0.0.0.0:6080
2014-01-03 11:36:49+0100 [images_test] DEBUG: Crawled (404) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib00_g.jpg> (referer: None)
2014-01-03 11:36:55+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib01_g.jpg> (referer: None)
2014-01-03 11:36:59+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib02_g.jpg> (referer: None)
2014-01-03 11:37:05+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib03_g.jpg> (referer: None)
2014-01-03 11:37:10+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib04_g.jpg> (referer: None)
2014-01-03 11:37:16+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib05_g.jpg> (referer: None)
2014-01-03 11:37:22+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib06_g.jpg> (referer: None)
2014-01-03 11:37:29+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib07_g.jpg> (referer: None)
2014-01-03 11:37:36+0100 [images_test] DEBUG: Crawled (200) <GET http://veranstaltungszentrum.bbaw.de/en/photo_gallery/leib08_g.jpg> (referer: None)
2014-01-03 11:37:36+0100 [images_test] INFO: Closing spider (finished)
2014-01-03 11:37:36+0100 [images_test] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 2376,
'downloader/request_count': 9,
'downloader/request_method_count/GET': 9,
'downloader/response_bytes': 343660,
'downloader/response_count': 9,
'downloader/response_status_count/200': 8,
'downloader/response_status_count/404': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2014, 1, 3, 10, 37, 36, 166139),
'log_count/DEBUG': 15,
'log_count/ERROR': 1,
'log_count/INFO': 3,
'log_count/WARNING': 1,
'response_received_count': 9,
'scheduler/dequeued': 9,
'scheduler/dequeued/memory': 9,
'scheduler/enqueued': 9,
'scheduler/enqueued/memory': 9,
'start_time': datetime.datetime(2014, 1, 3, 10, 36, 49, 37947)}
2014-01-03 11:37:36+0100 [images_test] INFO: Spider closed (finished)
How come images are not getting saved? Even my print item['image_urls'] command is not being executed.
Thank you

consider changing your spider code to the following:
start_urls = ['http://veranstaltungszentrum.bbaw.de/en/photo_gallery']
def parse(self, response):
sel = HtmlXPathSelector(response)
item = ImagesTestItem()
url = 'http://veranstaltungszentrum.bbaw.de'
return item['image_urls'] = [urljoin(url, x) for x in
sel.select('//img/#src').extract())]
HtmlXPathSelector can only parse html documents, it seem that you fed it with images from your start_urls

You can try out without piplines:
def parse(self,response):
#extract your images url
imageurl = response.xpath("//img/#src").get()
imagename = imageurl.split("/")[-1]
req = urllib.request.Request(imageurl, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36'})
resource = urllib.request.urlopen(req)
output = open("foldername/"+imagename,"wb")
output.write(resource.read())
output.close()

Related

I am using scrapy to scrape data from Yelp. I cannot see any error but data is not getting scraped from the StartURLs mentioned in the spider

Code for the items.py and other files are mentioned below. The logs are also mentioned at the end.I am not getting any error but according to the logs the scrapy has not scraped any pages.
```
import scrapy
class YelpItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
name_url = scrapy.Field()
rating = scrapy.Field()
date = scrapy.Field()
review_text = scrapy.Field()
user_pic = scrapy.Field()
city = scrapy.Field()
is_true = scrapy.Field()
```
code for settings.py
import pathlib
BOT_NAME = 'yelp-scrapy-dev'
SPIDER_MODULES = ['yelp-scrapy-dev.spiders']
NEWSPIDER_MODULE = 'yelp-scrapy-dev.spiders'
{
pathlib.Path('output1.csv'):{
'format':'csv',
},
}
ROBOTSTXT_OBEY = False
code for pipelines.py
class YelpPipeline:
def open_spider(self, spider):
self.file = open('output1.csv', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
return item
code for middlewares.py
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class YelpSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
#classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class YelpDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
#classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
code for city spider. The spider collects the reviews from the specified URL's
import scrapy
from ..items import YelpItem
# currently will grab the first 100 reviews from the first 10 businesses from start url
class CitySpider(scrapy.Spider):
name = 'city'
start_urls = [
'https://www.yelp.com/search?find_desc=&find_loc=Seattle%2C+WA',
'https://www.yelp.com/search?find_desc=&find_loc=SanFrancisco%2C+CA',
'https://www.yelp.com/search?find_desc=&find_loc=NewYork%2C+NY',
'https://www.yelp.com/search?find_desc=&find_loc=Dallas%2C+TX',
'https://www.yelp.com/search?find_desc=&find_loc=Atlanta%2C+GA',
]
# gets the first 10 businesses from the start url
def parse(self, response):
business_pages = response.css('.text-weight--bold__373c0__1elNz a')
yield from response.follow_all(business_pages, self.parse_business)
# extracts the first 100 reviews from the yelp-scrapy-dev business
def parse_business(self, response):
items = YelpItem()
all_reviews = response.css('.sidebarActionsHoverTarget__373c0__2kfhE')
address = response.request.url.split('?')
src = address[0].split('/')
biz = src[-1].split('-')
loc = biz[-1] if not biz[-1].isdigit() else biz[-2]
if loc == 'seattle':
city = 'Seattle, WA'
elif loc == 'dallas':
city = 'Dallas, TX'
elif loc == 'francisco':
city = 'San Francisco, CA'
elif loc == 'york':
city = 'New York, NY'
elif loc == 'atlanta':
city = 'Atlanta, GA'
else:
city = 'outofrange'
for review in all_reviews:
name = review.css('.link-size--inherit__373c0__1VFlE::text').extract_first()
name_url = review.css('.link-size--inherit__373c0__1VFlE::attr(href)').extract_first().split('=')
rating = review.css('.overflow--hidden__373c0__2y4YK::attr(aria-label)').extract()
date = review.css('.arrange-unit-fill__373c0__3Sfw1 .text-color--mid__373c0__jCeOG::text').extract()
review_text = review.css('.raw__373c0__3rKqk::text').extract()
user_pic = review.css('.gutter-1__373c0__2l5bx .photo-box-img__373c0__35y5v::attr(src)').extract()
if city != 'outofrange':
# making sure data is stored as a str
items['name'] = name
items['name_url'] = name_url[1]
items['rating'] = rating[0]
items['date'] = date[0]
items['review_text'] = review_text[0]
items['user_pic'] = user_pic[0] != 'https://s3-media0.fl.yelpcdn.com/assets/srv0/yelp_styleguide/514f6997a318/assets/img/default_avatars/user_60_square.png'
items['city'] = city
items['is_true'] = True
yield items
source = response.request.url
# prevent duplicate secondary pages from being recrawled
if '?start=' not in source:
# gets 20th-100th reviews, pages are every 20 reviews
for i in range(1, 5):
next_page = source + '?start=' + str(i*20)
yield response.follow(next_page, callback=self.parse_business)
And below are the log lines.
(venv) C:\Users\somar\yelp-scrapy\yelp>scrapy crawl city
2020-10-09 22:34:53 [scrapy.utils.log] INFO: Scrapy 2.1.0 started (bot: yelp-scrapy-dev)
2020-10-09 22:34:53 [scrapy.utils.log] INFO: Versions: lxml 4.5.2.0, libxml2 2.9.5, cssselect 1.1.0, parsel 1.6.0, w3lib 1.22.0, Twisted 20.3.0, Python 3.7
.6 (default, Jan 8 2020, 20:23:39) [MSC v.1916 64 bit (AMD64)], pyOpenSSL 19.1.0 (OpenSSL 1.1.1h 22 Sep 2020), cryptography 3.1.1, Platform Windows-10-10
.0.18362-SP0
2020-10-09 22:34:53 [scrapy.utils.log] DEBUG: Using reactor: twisted.internet.selectreactor.SelectReactor
2020-10-09 22:34:53 [scrapy.crawler] INFO: Overridden settings:
{'BOT_NAME': 'yelp-scrapy-dev',
'NEWSPIDER_MODULE': 'yelp-scrapy-dev.spiders',
'SPIDER_MODULES': ['yelp-scrapy-dev.spiders']}
2020-10-09 22:34:53 [scrapy.extensions.telnet] INFO: Telnet Password: 1f95c571b9245c42
2020-10-09 22:34:53 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.corestats.CoreStats',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.logstats.LogStats']
2020-10-09 22:34:54 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2020-10-09 22:34:54 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2020-10-09 22:34:54 [scrapy.middleware] INFO: Enabled item pipelines:
[]
2020-10-09 22:34:54 [scrapy.core.engine] INFO: Spider opened
2020-10-09 22:34:54 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2020-10-09 22:34:54 [scrapy.extensions.telnet] INFO: Telnet console listening on 127.0.0.1:6023
2020-10-09 22:34:55 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.yelp.com/search?find_desc=&find_loc=Dallas%2C+TX> (referer: None)
2020-10-09 22:34:55 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.yelp.com/search?find_desc=&find_loc=Atlanta%2C+GA> (referer: None)
2020-10-09 22:34:55 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.yelp.com/search?find_desc=&find_loc=Seattle%2C+WA> (referer: None)
2020-10-09 22:34:56 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.yelp.com/search?find_desc=&find_loc=NewYork%2C+NY> (referer: None)
2020-10-09 22:34:56 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.yelp.com/search?find_desc=&find_loc=SanFrancisco%2C+CA> (referer: None)
2020-10-09 22:34:56 [scrapy.core.engine] INFO: Closing spider (finished)
2020-10-09 22:34:56 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 1264,
'downloader/request_count': 5,
'downloader/request_method_count/GET': 5,
'downloader/response_bytes': 278234,
'downloader/response_count': 5,
'downloader/response_status_count/200': 5,
'elapsed_time_seconds': 2.159687,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2020, 10, 10, 5, 34, 56, 173193),
'log_count/DEBUG': 5,
'log_count/INFO': 10,
'response_received_count': 5,
'scheduler/dequeued': 5,
'scheduler/dequeued/memory': 5,
'scheduler/enqueued': 5,
'scheduler/enqueued/memory': 5,
'start_time': datetime.datetime(2020, 10, 10, 5, 34, 54, 13506)}
2020-10-09 22:34:56 [scrapy.core.engine] INFO: Spider closed (finished)

Scrapy runs but doesn't crawl site - Scrapy Shell response in loop

I've tried to set up Scrapy to crawl a database for technical norm and standards.
What is the problem:
I wrote a Scraper, got a 200 response but no results - it scraped 0 pages:
2020-09-06 12:42:00 [scrapy.utils.log] INFO: Scrapy 1.5.1 started (bot: stack)
2020-09-06 12:42:00 [scrapy.utils.log] INFO: Versions: lxml 4.2.5.0, libxml2 2.9.5, cssselect 1.0.3, parsel 1.5.1, w3lib 1.19.0, Twisted 18.9.0, Python 3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 22:20:52) [MSC v.1916 32 bit (Intel)], pyOpenSSL 18.0.0 (OpenSSL 1.1.0j 20 Nov 2018), cryptography 2.4.2, Platform Windows-10-10.0.18362-SP0
2020-09-06 12:42:00 [scrapy.crawler] INFO: Overridden settings: {'BOT_NAME': 'stack', 'NEWSPIDER_MODULE': 'stack.spiders', 'SPIDER_MODULES': ['stack.spiders'], 'USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
2020-09-06 12:42:00 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.corestats.CoreStats',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.logstats.LogStats']
2020-09-06 12:42:01 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2020-09-06 12:42:01 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2020-09-06 12:42:01 [scrapy.middleware] INFO: Enabled item pipelines:
[]
2020-09-06 12:42:01 [scrapy.core.engine] INFO: Spider opened
2020-09-06 12:42:01 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2020-09-06 12:42:01 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2020-09-06 12:42:01 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.beuth.de/de/regelwerke/vdi/vdi-richtlinien-entwuerfe> (referer: None)
2020-09-06 12:42:01 [scrapy.core.engine] INFO: Closing spider (finished)
2020-09-06 12:42:01 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 341,
'downloader/request_count': 1,
'downloader/request_method_count/GET': 1,
'downloader/response_bytes': 6149,
'downloader/response_count': 1,
'downloader/response_status_count/200': 1,
'downloader/request_method_count/GET': 1,
'downloader/response_bytes': 6149,
'downloader/response_count': 1,
'downloader/response_status_count/200': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2020, 9, 6, 10, 42, 1, 684021),
'log_count/DEBUG': 2,
'log_count/INFO': 7,
'response_received_count': 1,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'start_time': datetime.datetime(2020, 9, 6, 10, 42, 1, 140686)}
2020-09-06 12:42:01 [scrapy.core.engine] INFO: Spider closed (finished)
Here are my items out of the items.py file which I want to scrape:
from scrapy.item import Item, Field
class StackItem(Item):
title = Field()
url = Field()
date = Field()
price = Field()
subtitle = Field()
description = Field()
This my crawler code:
from scrapy import Spider
from scrapy.selector import Selector
from stack.items import StackItem
class StackSpider(Spider):
name = "stack"
allowed_domains = ["www.beuth.de"]
start_urls = [
"https://www.beuth.de/de/regelwerke/vdi/vdi-richtlinien-entwuerfe",
]
def parse(self, response):
elements = Selector(response).xpath('//div[#class="bwr-card__inner"]')
for element in elements:
item = StackItem()
item['title'] = element.xpath('a[#class="bwr-link__label"]/text()').extract()[0]
item['url'] = element.xpath('a[#class="bwr-card__title-link"]/#href').extract()[0]
item['date'] = element.xpath('div[#class="bwr-type__item bwr-type__item--light"]/text()').extract()[0]
item['price'] = element.xpath('div[#class="bwr-buybox__price-emph]/text()').extract()[0]
item['subtitle'] = element.xpath('div[#class="bwr-card__subtitle bwr-data-dlink"]/text()').extract()[0]
item['description'] = element.xpath('div[#class="bwr-card__text bwr-rte bwr-data-dlink"]/text()').extract()[0]
yield item
What I tried so solve the problem:
I tried the Scrapy-Shell. After I used a defined User-Agent the website didn't block me:
In [2]: from scrapy import Request
...: req = Request("https://www.beuth.de/de/regelwerke/vdi/vdi-richtlinien-entwuerfe",
headers={"USER-AGENT" : "Mozi
...: lla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 OPR/45.0.
...: 2552.888"})
...: fetch(req)
2020-09-06 12:48:19 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.beuth.de/de/regelwerke/vdi/vdi-richtlinien-entwuerfe>
(referer: None)
After that I tested the selectors (see items list above) but unfortunately I just get a [] whatever XPath I use:
In [3]: response.xpath("//div[#class='bwr-buybox__price']/a/text").getall()
Out[3]: []
When I try view(response) I just a Browser Window with an infinite loading loop.
I tried to analyse the outcome but without an error I wasnt sure, where to start fixing.
I defined a User Agent in the settings.py because otherwise I got an error message about blocking (I think the website dont allow crawls).
Summary:
I want to scrape the Items above from the list. But I am not sure if there is just a problem with my selectors because testing in Shell results in a [ ] everytime.

scrapy not working on imdb keywords pages

Here is how I intend this code to work;
I have a keyword, say, "gadgets". I search titles on advanced imdb search page. I want the code to go to each title page, then go to keywords page of each title and then download title and all the keywords.
The code structure looks good to me but it is really not working.
Please suggest whether it needs to be re-written or it can be corrected with some advice?
Here is my spider:
import scrapy
class KwordsSpider(scrapy.Spider):
name= 'ImdbSpider'
allowed_domains = ['imdb.com']
start_urls = [
'https://www.imdb.com/search/title/?keywords=gadgets'
]
def parse(self, response):
titleLinks = response.xpath('//*[#class="lister-item-content"]')
for link in titleLinks:
title_url = 'https://www.imdb.com'+link.xpath('.//h3/a/#href').extract_first()
yield scrapy.Request(title_url, callback=self.parse_title)
next_page_url = 'https://www.imdb.com'+response.xpath('//div[#class="article"]/div[#class="desc"]/a[#href]').extract_first()
if next_page_url is not None:
next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(next_page_url, callback=self.parse)
def parse_title(self, response):
keywords_url = 'https://www.imdb.com' + response.xpath('//nobr/a[#href]').extract_first()
yield scrapy.Request(keywords_url, callback=self.parse_keys)
#looking at the keywords page
def parse_keys(self, response):
title = response.xpath('//h3/a/text()').extract_first()
keys = response.xpath('//div[#class="sodatext"]/a/text()').extract()
print('my print'+title)
yield{
'title': title,
'Keywords': keys,
}
Following are few power shell lines
2020-05-02 08:33:40 [scrapy.middleware] INFO: Enabled item pipelines:
[]
2020-05-02 08:33:40 [scrapy.core.engine] INFO: Spider opened
2020-05-02 08:33:40 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2020-05-02 08:33:40 [scrapy.extensions.telnet] INFO: Telnet console listening on 127.0.0.1:6023
2020-05-02 08:33:43 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.imdb.com/search/title/?keywords=gadgets> (referer: None)
2020-05-02 08:33:43 [scrapy.spidermiddlewares.offsite] DEBUG: Filtered offsite request to 'www.imdb.com<a href="': <GET https://www.imdb.com<a href="/search/title/?keywords=gadgets&start=51%22%20class=%22lister-page-next%20next-page%22%3ENext%20%C2%BB%3C/a%3E>
2020-05-02 08:33:46 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.imdb.com/title/tt3896198/> (referer: https://www.imdb.com/search/title/?keywords=gadgets)
2020-05-02 08:34:11 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.imdb.com/title/tt0369171/> (referer: https://www.imdb.com/search/title/?keywords=gadgets)
2020-05-02 08:34:11 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://www.imdb.com/title/tt1149317/> (referer: https://www.imdb.com/search/title/?keywords=gadgets)
2020-05-02 08:34:11 [scrapy.core.engine] INFO: Closing spider (finished)
Few xpaths in your script were wrong. I've fixed them. It should work now.
class KwordsSpider(scrapy.Spider):
name = 'ImdbSpider'
start_urls = [
'https://www.imdb.com/search/title/?keywords=gadgets'
]
def parse(self, response):
titleLinks = response.xpath('//*[#class="lister-item-content"]')
for link in titleLinks:
title_url = response.urljoin(link.xpath('.//h3/a/#href').get())
yield scrapy.Request(title_url, callback=self.parse_title)
next_page_url = response.xpath('//div[#class="article"]/div[#class="desc"]/a/#href').get()
if next_page_url:
next_page_url = response.urljoin(next_page_url)
yield scrapy.Request(next_page_url, callback=self.parse)
def parse_title(self, response):
keywords_url = response.urljoin(response.xpath('//nobr/a/#href').get())
yield scrapy.Request(keywords_url, callback=self.parse_keys)
def parse_keys(self, response):
title = response.xpath('//h3/a/text()').get()
keys = response.xpath('//div[#class="sodatext"]/a/text()').getall()
yield {
'title': title,
'Keywords': keys,
}

Scrapy 0 pages crawled but no visible issue?

I used Portia to create a spider and then downloaded it as scrapy project. The spider runs fine but it says in the logs: Scrapy Crawled 0 pages (at 0 pages/min) and also nothing get's saved. However, it also shows all the pages crawled with 200 response, then shows the bytes of data at the end..
Spider Code
from __future__ import absolute_import
from scrapy import Request
from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Identity
from scrapy.spiders import Rule
from ..utils.spiders import BasePortiaSpider
from ..utils.starturls import FeedGenerator, FragmentGenerator
from ..utils.processors import Item, Field, Text, Number, Price, Date, Url, Image, Regex
from ..items import PortiaItem, AllProductsBooksToScrapeSandboxItem
class BooksToscrape(BasePortiaSpider):
name = "books.toscrape.com"
allowed_domains = ['books.toscrape.com']
start_urls = [{'fragments': [{'valid': True,
'type': 'fixed',
'value': 'http://books.toscrape.com/catalogue/page-'},
{'valid': True,
'type': 'range',
'value': '1-50'},
{'valid': True,
'type': 'fixed',
'value': '.html'}],
'type': 'generated',
'url': 'http://books.toscrape.com/catalogue/page-[1-50].html'}]
rules = [
Rule(
LinkExtractor(
allow=(),
deny=('.*')
),
callback='parse_item',
follow=True
)
]
items = [
[
Item(
AllProductsBooksToScrapeSandboxItem, None, '.product_pod', [
Field(
'title', 'h3 > a::attr(title)', []), Field(
'price', '.product_price > .price_color *::text', [])])]]
Pipeline Code
I added openSpider and closeSpider functions to write the items to json lines upon crawling and I think it works because jl file gets created.
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class TesterPipeline(object):
def open_spider(self, spider):
self.file = open('items.jl', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
Settings Code
Enabled pipeline in settings too for pipeline to work.
# -*- coding: utf-8 -*-
# Scrapy settings for Tester project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Tester'
SPIDER_MODULES = ['Tester.spiders']
NEWSPIDER_MODULE = 'Tester.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Tester (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Tester.middlewares.TesterSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Tester.middlewares.TesterDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Tester.pipelines.TesterPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
When I run the spider, the following log is created:
(scrape) C:\Users\da74\Desktop\tester>scrapy crawl books.toscrape.com
2018-07-24 12:18:15 [scrapy.utils.log] INFO: Scrapy 1.5.0 started (bot: Tester)
2018-07-24 12:18:15 [scrapy.utils.log] INFO: Versions: lxml 4.2.2.0, libxml2 2.9.8, cssselect 1.0.3, parsel 1.4.0, w3lib 1.19.0, Twisted 17.5.0, Python 3.6.6 |Anaconda, Inc.| (default, Jun 28 2018, 11:27:44) [MSC v.1900 64 bit (AMD64)], pyOpenSSL 18.0.0 (OpenSSL 1.0.2o 27 Mar 2018), cryptography 2.2.2, Platform Windows-10-10.0.17134-SP0
2018-07-24 12:18:15 [scrapy.crawler] INFO: Overridden settings: {'BOT_NAME': 'Tester', 'NEWSPIDER_MODULE': 'Tester.spiders', 'ROBOTSTXT_OBEY': True, 'SPIDER_MODULES': ['Tester.spiders']}
2018-07-24 12:18:15 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.corestats.CoreStats',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.logstats.LogStats']
2018-07-24 12:18:16 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware',
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2018-07-24 12:18:16 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2018-07-24 12:18:16 [scrapy.middleware] INFO: Enabled item pipelines:
['Tester.pipelines.TesterPipeline']
2018-07-24 12:18:16 [scrapy.core.engine] INFO: Spider opened
2018-07-24 12:18:16 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2018-07-24 12:18:16 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2018-07-24 12:18:16 [scrapy.core.engine] DEBUG: Crawled (404) <GET http://books.toscrape.com/robots.txt> (referer: None)
2018-07-24 12:18:16 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-1.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-2.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-7.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-4.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-3.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-9.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-5.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-8.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-6.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-10.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-12.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-11.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-14.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-15.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-16.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-17.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-13.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-18.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-19.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-21.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-20.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-22.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-23.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-25.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-24.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-26.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-27.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-32.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-29.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-30.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-33.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-28.html> (referer: None)
2018-07-24 12:18:17 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-31.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-34.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-35.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-36.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-39.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-40.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-38.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-41.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-37.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-42.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-43.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-44.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-47.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-45.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-46.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-48.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-49.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://books.toscrape.com/catalogue/page-50.html> (referer: None)
2018-07-24 12:18:18 [scrapy.core.engine] INFO: Closing spider (finished)
2018-07-24 12:18:18 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 12168,
'downloader/request_count': 51,
'downloader/request_method_count/GET': 51,
'downloader/response_bytes': 299913,
'downloader/response_count': 51,
'downloader/response_status_count/200': 50,
'downloader/response_status_count/404': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2018, 7, 24, 4, 18, 18, 598891),
'log_count/DEBUG': 52,
'log_count/INFO': 7,
'response_received_count': 51,
'scheduler/dequeued': 50,
'scheduler/dequeued/memory': 50,
'scheduler/enqueued': 50,
'scheduler/enqueued/memory': 50,
'start_time': datetime.datetime(2018, 7, 24, 4, 18, 16, 208142)}
2018-07-24 12:18:18 [scrapy.core.engine] INFO: Spider closed (finished)
I don't understand why it isn't gathering items. I says first that 0 items crawled and then shows 200 success response for pages..
Please if anyone has any idea what to try to make it crawl will be helpful.
Thankyou

Q: scrapy-redis didn't scrape any page, just finished in a second

My spider scrapes no pages,finished in less than a second ,but throws no errors.
I've checked the code and compared with another similar project which I ran successfully a few weeks ago, but still can't figure out what the problem might be .
I'm using scrapy 1.0.1 and scrapy-redis 0.6.
this is the logs:
2015-07-21 11:33:20 [scrapy] INFO: Scrapy 1.0.1 started (bot: demo)
2015-07-21 11:33:20 [scrapy] INFO: Optional features available: ssl, http11
2015-07-21 11:33:20 [scrapy] INFO: Overridden settings: {'NEWSPIDER_MODULE': 'demo.spiders', 'LOG_LEVEL': 'INFO', 'SPIDER_MODULES': ['demo.spiders'], 'RETRY_HTTP_CODES': [500, 502, 503, 504, 400, 408, 404, 302, 403], 'BOT_NAME': 'demo', 'SCHEDULER': 'scrapy_redis.scheduler.Scheduler', 'DEFAULT_ITEM_CLASS': 'demo.items.DemoItem', 'REDIRECT_ENABLED': False}
2015-07-21 11:33:20 [scrapy] INFO: Enabled extensions: CloseSpider, TelnetConsole, LogStats, CoreStats, SpiderState
2015-07-21 11:33:20 [scrapy] INFO: Enabled downloader middlewares: CustomUserAgentMiddleware, CustomHttpProxyMiddleware, HttpAuthMiddleware, DownloadTimeoutMiddleware, RetryMiddleware, DefaultHeadersMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, CookiesMiddleware, ChunkedTransferMiddleware, DownloaderStats
2015-07-21 11:33:20 [scrapy] INFO: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2015-07-21 11:33:20 [scrapy] INFO: Enabled item pipelines: RedisPipeline, DemoPipeline
2015-07-21 11:33:20 [scrapy] INFO: Spider opened
2015-07-21 11:33:20 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2015-07-21 11:33:20 [scrapy] INFO: Closing spider (finished)
2015-07-21 11:33:20 [scrapy] INFO: Dumping Scrapy stats:
{'finish_reason': 'finished',
'finish_time': datetime.datetime(2015, 7, 21, 3, 33, 20, 301371),
'log_count/INFO': 7,
'start_time': datetime.datetime(2015, 7, 21, 3, 33, 20, 296941)}
2015-07-21 11:33:20 [scrapy] INFO: Spider closed (finished)
this is the spider.py
# -*- coding: utf-8 -*-
import scrapy
from demo.items import DemoItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisMixin
from pip._vendor.requests.models import Request
class DemoCrawler(RedisMixin, CrawlSpider):
name = "demo"
redis_key = "demoCrawler:start_urls"
rules = ( Rule(LinkExtractor(allow='/shop/\d+?/$',restrict_xpaths=u"//ul/li/div[#class='txt']/div[#class='tit']/a"),callback = 'parse_demo'),
Rule(LinkExtractor(restrict_xpaths = u"//div[#class='shop-wrap']/div[#class='page']/a[#class='next']"),follow = True)
)
def parse_demo(self,response):
item = DemoItem()
temp = response.xpath(u"//div[#id='basic-info']/div[#class='action']/a/#href").re("\d.+\d")
item['id'] = temp[0] if temp else ''
temp = response.xpath(u"//div[#class='page-header']/div[#class='container']/a[#class='city J-city']/text()").extract()
item['city'] = temp[0] if temp else ''
temp = response.xpath(u"//div[#class='breadcrumb']/span/text()").extract()
item['name'] = temp[0] if temp else ''
temp = response.xpath(u"//div[#class='main']/div[#id='sales']/text()").extract()
item['deals'] = temp[0] if temp else ''
temp = response.xpath(u"//div[#class='main-nav']/div[#class='container']/a[1]/text()").extract()
item['category'] = temp[0] if temp else ''
temp = response.xpath(u"//div[#class='main']/div[#id='basic-info']/div[#class='expand-info address']/a/span/text()").extract()
item['region'] = temp[0] if temp else ''
temp = response.xpath(u"//div[#class='main']/div[#id='basic-info']/div[#class='expand-info address']/span/text()").extract()
item['address'] = temp[0] if temp else ''
yield item
To start the spider,I should type two commands in the shell:
redis-cli lpush demoCrawler:start_urls url
scrapy crawl demo
url is the specific url I'm scraping,for exmaple http://google.com

Resources