Monday, 4 June 2018

scrapy crawl a set of links that might contains next pages

I want to:

  1. Extract links for a certain page
  2. For each link, I need some contents for that link, and the contents of 'next pages' of that link.
  3. Then export it as json file(not important as far as I think regarding my problem)

Currently my spider is like this:

class mySpider(scrapy.Spider):
     ...
    def parse(self, response):
        for url in someurls:
            yield scrapy.Request(url=url, callback=self.parse_next)

    def parse_next(self, response):
        for selector in someselectors:
            yield { 'contents':...,
                     ...}
        nextPage = obtainNextPage()
        if nextPage:
            yield scrapy.Request(url=next_url, callback=self.parse_next)

The problem is for a set of links that the spider processed, the spider could only reach 'next page' for the last link of that set of links, I viewed that through selenium + chromedriver. For example, I have 10 links(from No.1 to No.10), my spider could only get the next pages for the No.10 link. I don't know if the problem occurred was because of some structural problem of my spider. Below is the full code:

import scrapy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time


class BaiduSpider(scrapy.Spider):
    name = 'baidu'
    allowed_domains = ['baidu.com']
    start_urls = ['http://tieba.baidu.com']
    main_url = 'http://tieba.baidu.com/f?kw=%E5%B4%94%E6%B0%B8%E5%85%83&ie=utf-8'
    username = ""
    password = ""

    def __init__(self, username=username, password=password):
        #options = webdriver.ChromeOptions()
        #options.add_argument('headless')
        #options.add_argument('window-size=1200x600')
        self.driver = webdriver.Chrome()#chrome_options=options)
        self.username = username
        self.password = password
    # checked
    def logIn(self):
        elem = self.driver.find_element_by_css_selector('#com_userbar > ul > li.u_login > div > a')
        elem.click()
        wait = WebDriverWait(self.driver,10).until(EC.presence_of_element_located((By.CSS_SELECTOR,'#TANGRAM__PSP_10__footerULoginBtn')))
        elem = self.driver.find_element_by_css_selector('#TANGRAM__PSP_10__footerULoginBtn')
        elem.click()
        elem = self.driver.find_element_by_css_selector('#TANGRAM__PSP_10__userName')
        elem.send_keys(self.username)
        elem = self.driver.find_element_by_css_selector('#TANGRAM__PSP_10__password')
        elem.send_keys(self.password)
        self.driver.find_element_by_css_selector('#TANGRAM__PSP_10__submit').click()
    # basic checked
    def parse(self, response):
        self.driver.get(response.url)
        self.logIn()
        # wait for hand input verify code
        time.sleep(15)
        self.driver.get('http://tieba.baidu.com/f?kw=%E5%B4%94%E6%B0%B8%E5%85%83&ie=utf-8')
        for url in self.driver.find_elements_by_css_selector('a.j_th_tit')[:2]:
            #new_url = response.urljoin(url)
            new_url = url.get_attribute("href")
            yield scrapy.Request(url=new_url, callback=self.parse_next)
    # checked
    def pageScroll(self, url):
        self.driver.get(url)
        SCROLL_PAUSE_TIME = 0.5
        SCROLL_LENGTH = 1200
        page_height = int(self.driver.execute_script("return document.body.scrollHeight"))
        scrollPosition = 0
        while scrollPosition < page_height:
            scrollPosition = scrollPosition + SCROLL_LENGTH
            self.driver.execute_script("window.scrollTo(0, " + str(scrollPosition) + ");")
            time.sleep(SCROLL_PAUSE_TIME)
        time.sleep(1.2)

    def parse_next(self, response):
        self.log('I visited ' + response.url)
        self.pageScroll(response.url)

        for sel in self.driver.find_elements_by_css_selector('div.l_post.j_l_post.l_post_bright'):
            name = sel.find_element_by_css_selector('.d_name').text
            try:
                content = sel.find_element_by_css_selector('.j_d_post_content').text
            except: content = ''

            try: reply = sel.find_element_by_css_selector('ul.j_lzl_m_w').text
            except: reply = ''
            yield {'name': name, 'content': content, 'reply': reply}

        #follow to next page

        next_sel = self.driver.find_element_by_link_text("下一页")
        next_url_name = next_sel.text

        if next_sel and next_url_name == '下一页':
            next_url = next_sel.get_attribute('href')

            yield scrapy.Request(url=next_url, callback=self.parse_next)

Thanks for your help, and welcome any suggestions referring my code above



from scrapy crawl a set of links that might contains next pages

No comments:

Post a Comment