1.安装类库
pip install -i 源 requests
pip install -i 源 lxml
2.步骤
1.获取页面信息
- 通过csv文件读取参数变量
2.提取页面元素
3.保存抓取内容
3.脚本
import requests
from lxml import etree
import csv
# 获取页面信息
def get_page(url):
# 发送请求
res = requests.get(url)
# 页面编码转换
code = res.apparent_encoding
# 设置网站编码属性
res.encoding = "gbk"
# 获取页面多选
# print(res.text)
return res.text
# 提取页面元素
def get_element(content, path_str):
# 存储抓取数据
listcont = []
# 将页面信息转化为dom格式
doc = etree.HTML(content)
# 循环提取页面上的元素
for i in range(1, 11):
# 通过xpath进行元素定位
ele = doc.xpath(path_str + str(i) + ']/p/text()')[0]
# 打印输出元素
# print(ele)
# 元素内容列表
listcont.append(str(i)+ele+'\n')
return listcont
# 保存抓取内容
def save_element(listcont):
# 元素内容列表
#创建一个文件
file = open("spider.txt", "a", encoding='utf-8')
# 写入元素内容
file.write('\n'.join(listcont) + "\n")
# 保存并关闭文件
file.close()
if __name__ == '__main__':
file_csv = open('test1.csv', 'r')
# 获取文件内容
rows = csv.reader(file_csv)
i = 0
for row in rows:
i = i + 1
path_str = row[1]
# 如果是首页
if i == 1 or i == 2:
# 输入URL
url = row[0]
print(url)
print(path_str)
content = get_page(url)
listcont = get_element(content, path_str)
print(listcont)
save_element(listcont)
# 非首页
else:
for i in range(3, 10):
url = row[0] + str(i-1) + '.html'
print(url)
print(path_str)
content = get_page(url)
listcont = get_element(content, path_str)
print(listcont)
save_element(listcont)
附件:test1.csv
http://www.51testing.com/html/04/category-catid-104.html,/html/body/div[6]/div[1]/div[
http://www.51testing.com/html/16/category-catid-116.html,/html/body/div[4]/div/div[
http://www.51testing.com/html/04/category-catid-104-page-,/html/body/div[6]/div[1]/div[
http://www.51testing.com/html/16/category-catid-116-page-,/html/body/div[4]/div/div[
