http://www.gooseeker.com/doc/thread-2072-1-1.html按教程案例完整做下来后,报错了,截图如下:
lxml,selenium,phantomJS都已经安装,不知道是哪里的问题啊?对于gooseeker.py和douban.py的代码如下以供查看:
- #!/usr/bin/python
- # -*- coding: utf-8 -*-
- # 模块名: gooseeker
- # 类名: GsExtractor
- # Version: 2.1
- # 说明: html内容提取器
- # 功能: 使用xslt作为模板,快速提取HTML DOM中的内容。
- # released by 集搜客(http://www.gooseeker.com) on May 18, 2016
- # github: https://github.com/FullerHua/jisou/core/gooseeker.py
- import time
- from urllib import request
- from urllib.parse import quote
- from lxml import etree
- class GsExtractor(object):
- def _init_(self):
- self.xslt = ""
- # 从文件读取xslt
- def setXsltFromFile(self , xsltFilePath):
- file = open(xsltFilePath , 'r' , encoding='UTF-8')
- try:
- self.xslt = file.read()
- finally:
- file.close()
- # 从字符串获得xslt
- def setXsltFromMem(self , xsltStr):
- self.xslt = xsltStr
- # 通过GooSeeker API接口获得xslt
- def setXsltFromAPI(self , APIKey , theme, middle=None, bname=None):
- apiurl = "http://www.gooseeker.com/api/getextractor?key="+ APIKey +"&theme="+quote(theme)
- if (middle):
- apiurl = apiurl + "&middle="+quote(middle)
- if (bname):
- apiurl = apiurl + "&bname="+quote(bname)
- apiconn = request.urlopen(apiurl)
- self.xslt = apiconn.read()
- # 返回当前xslt
- def getXslt(self):
- return self.xslt
- # 提取方法,入参是一个HTML DOM对象,返回是提取结果
- def extract(self , html):
- xslt_root = etree.XML(self.xslt)
- transform = etree.XSLT(xslt_root)
- result_tree = transform(html)
- return result_tree
- # 提取方法,入参是html源码,返回是提取结果
- def extractHTML(self , html):
- doc = etree.HTML(html)
- return self.extract(doc)
复制代码- # _*_coding:utf-8_*_
- # douban.py
- # 爬取豆瓣小组讨论话题
- from urllib import request
- from lxml import etree
- from gooseeker import GsExtractor
- from selenium import webdriver
- class PhantomSpider:
- def getContent(self, url):
- browser = webdriver.PhantomJS(executable_path='H:\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe')
- browser.get(url)
- time.sleep(3)
- html = browser.execute_script("return document.documentElement.outerHTML")
- output = etree.HTML(html)
- return output
- def saveContent(self, filepath, content):
- file_obj = open(filepath, 'w', encoding='UTF-8')
- file_obj.write(content)
- file_obj.close()
- doubanExtra = GsExtractor()
- # 下面这句调用gooseeker的api来设置xslt抓取规则
- # 第一个参数是app key,请到GooSeeker会员中心申请
- # 第二个参数是规则名,是通过GooSeeker的图形化工具: 谋数台MS 来生成的
- doubanExtra.setXsltFromAPI("ffd5273e213036d812ea298922e2627b" , "豆瓣小组讨论话题")
- url = "https://www.douban.com/group/haixiuzu/discussion?start="
- totalpages = 5
- doubanSpider = PhantomSpider()
- print("爬取开始")
- for pagenumber in range(1 , totalpages):
- currenturl = url + str((pagenumber-1)*25)
- print("正在爬取", currenturl)
- content = doubanSpider.getContent(currenturl)
- outputxml = doubanExtra.extract(content)
- outputfile = "result" + str(pagenumber) +".xml"
- doubanSpider.saveContent(outputfile , str(outputxml))
- print("爬取结束")
复制代码
|
本帖子中包含更多资源
您需要 登录 才可以下载或查看,没有帐号?立即注册
x
|
|
|
|
共 3 个关于本帖的回复 最后回复于 2017-8-3 21:00