python爬取换页_爬虫爬不进下一页了,怎么办
该楼层疑似违规已被系统折叠 隐藏此楼查看此楼#-*- coding: UTF-8 -*-import scrapyfrom hoho.items import HohoItemimport refrom scrapy.selector import Selectorimport sysreload(sys)sys.setdefaultencoding( "UTF-8" )class tongSpi
该楼层疑似违规已被系统折叠 隐藏此楼查看此楼
#-*- coding: UTF-8 -*-
import scrapy
from hoho.items import HohoItem
import re
from scrapy.selector import Selector
import sys
reload(sys)
sys.setdefaultencoding( "UTF-8" )
class tongSpider(scrapy.Spider):
name = 'guwen'
start_urls=['http://www.shicifuns.com/v2/wenyan/list']
def parse(self,response):
papers = response.xpath('//div[@class="css_content"]/div/div[@class="css_body_left"]/div[@class="every_day"]/ul')
for paper in papers:
for p in paper.xpath('li'):
name = p.xpath('a/div/div[@class="poem_title"]/span/text()').extract()[0]
url = p.xpath('a/@href').extract()[0]
content = p.xpath('a/div/div[@class="poem_content"]/text()').extract()[0].strip("\r\n ")
author = p.xpath('a/div/div[@class="poem_info"]/span[@class="dynasty"]/text()').extract()[0]
pinfen = p.xpath('a/div/div[@class="poem_info"]/span[@class="dynasty"]/text()').extract()[1]
item = HohoItem(name = name,url="http://www.shicifuns.com"+url,content=content,author=author,pinfen=pinfen)
yield item
next = response.xpath("//div[@class='css_content']/div/div[@class='css_body_left']/div[@class='pagination']/ul/li/a[@class='next page focus']/@href").extract()
if next:
yield scrapy.Request(url = "http://www.shicifuns.com" + next[0],callback=self.parse)
开放原子开发者工作坊旨在鼓励更多人参与开源活动,与志同道合的开发者们相互交流开发经验、分享开发心得、获取前沿技术趋势。工作坊有多种形式的开发者活动,如meetup、训练营等,主打技术交流,干货满满,真诚地邀请各位开发者共同参与!
更多推荐
所有评论(0)