2022年 11月 10日

Python爬虫外包案例

简单讲解价值1K的Python爬虫外包案例

          • 数据获取:
  • 常规爬取数据
  • 多线程爬取数据
  • scrapy框架爬取数据
    • items.py
    • middlewares.py
    • pipelines.py
    • settings.py
数据获取:

1、标题

2、薪资

3、所在城市

4、学历要求

5、工作经验要求

6、公司名字

7、公司福利

8、公司融资情况

9、简历发布时间

该网页是比较简单的,静态网页没有什么可以过多的分析,还是比较简单的。
1、模拟浏览器请求网页,获取网页源代码数据

2、解析网页源代码,提取想要的数据内容

3、将提取的数据内容保存成csv文件,或者其他形式

常规爬取数据

常规爬取数据

import requests
import parsel
import csv
  • 1
  • 2
  • 3
import requests
import parsel
import csv

f = open('data.csv', mode='a', encoding='utf-8', newline='')
csv_writer = csv.DictWriter(f, fieldnames=['标题', '薪资', '城市',
                                           '学历', '工作经验', '公司名字',
                                           '融资情况', '公司福利', '招聘时间',
                                           '简历反馈时间'
                                           ])
csv_writer.writeheader()


for page in range(0, 10):
    url = 'https://www.liepin.com/zhaopin/'
    params = {
        'compkind': '',
        'dqs': '',
        'pubTime': '',
        'pageSize': '40',
        'salary': '',
        'compTag': '',
        'sortFlag': '',
        'degradeFlag': '0',
        'compIds': '',
        'subIndustry': '',
        'jobKind': '',
        'industries': '',
        'compscale': '',
        'key': 'python',
        'siTag': 'I-7rQ0e90mv8a37po7dV3Q~fA9rXquZc5IkJpXC-Ycixw',
        'd_sfrom': 'search_fp',
        'd_ckId': 'cd74f9fdbdb63c6d462bad39feddc7f1',
        'd_curPage': '2',
        'd_pageSize': '40',
        'd_headId': 'cd74f9fdbdb63c6d462bad39feddc7f1',
        'curPage': page,
    }
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'}
    response = requests.get(url=url, params=params, headers=headers)
    selector = parsel.Selector(response.text)
    lis = selector.css('div.job-content div:nth-child(1) ul li')
    for li in lis:
        title = li.css('.job-info h3 a::text').get().strip()
        money = li.css('.condition span.text-warning::text').get()
        city = li.css('.condition .area::text').get()
        edu = li.css('.condition .edu::text').get()
        experience = li.css('.condition span:nth-child(4)::text').get()
        company = li.css('.company-name a::text').get()
        financing = li.css('.field-financing span::text').get()
        temptation_list = li.css('p.temptation.clearfix span::text').getall()
        temptation_str = '|'.join(temptation_list)
        release_time = li.css('p.time-info.clearfix time::text').get()
        feedback_time = li.css('p.time-info.clearfix span::text').get()
        dit = {
            '标题': title,
            '薪资': money,
            '城市': city,
            '学历': edu,
            '工作经验': experience,
            '公司名字': company,
            '融资情况': financing,
            '公司福利': temptation_str,
            '招聘时间': release_time,
            '简历反馈时间': feedback_time,
        }
        csv_writer.writerow(dit)
        print(dit)


  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70

多线程爬取数据

多线程爬取数据

import requests
import parsel
import csv
import threading

f = open('data_1.csv', mode='a', encoding='utf-8', newline='')
csv_writer = csv.DictWriter(f, fieldnames=['标题', '薪资', '城市',
                                           '学历', '工作经验', '公司名字',
                                           '融资情况', '公司福利', '招聘时间',
                                           '简历反馈时间'
                                           ])
csv_writer.writeheader()


def get_response(html_url, p):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'}
    response = requests.get(url=html_url, params=p, headers=headers)
    return response


def get_parsing(html_data):
    selector = parsel.Selector(html_data)
    return selector


def main(p):
    url = 'https://www.liepin.com/zhaopin/'
    html_data = get_response(url, p).text
    selector = get_parsing(html_data)
    lis = selector.css('div.job-content div:nth-child(1) ul li')
    for li in lis:
        title = li.css('.job-info h3 a::text').get().strip()
        money = li.css('.condition span.text-warning::text').get()
        city = li.css('.condition .area::text').get()
        edu = li.css('.condition .edu::text').get()
        experience = li.css('.condition span:nth-child(4)::text').get()
        company = li.css('.company-name a::text').get()
        financing = li.css('.field-financing span::text').get()
        temptation_list = li.css('p.temptation.clearfix span::text').getall()
        temptation_str = '|'.join(temptation_list)
        release_time = li.css('p.time-info.clearfix time::text').get()
        feedback_time = li.css('p.time-info.clearfix span::text').get()
        dit = {
            '标题': title,
            '薪资': money,
            '城市': city,
            '学历': edu,
            '工作经验': experience,
            '公司名字': company,
            '融资情况': financing,
            '公司福利': temptation_str,
            '招聘时间': release_time,
            '简历反馈时间': feedback_time,
        }
        csv_writer.writerow(dit)
        print(dit)


if __name__ == '__main__':
    for page in range(0, 10):
        params = {
            'compkind': '',
            'dqs': '',
            'pubTime': '',
            'pageSize': '40',
            'salary': '',
            'compTag': '',
            'sortFlag': '',
            'degradeFlag': '0',
            'compIds': '',
            'subIndustry': '',
            'jobKind': '',
            'industries': '',
            'compscale': '',
            'key': 'python',
            'siTag': 'I-7rQ0e90mv8a37po7dV3Q~fA9rXquZc5IkJpXC-Ycixw',
            'd_sfrom': 'search_fp',
            'd_ckId': 'cd74f9fdbdb63c6d462bad39feddc7f1',
            'd_curPage': '2',
            'd_pageSize': '40',
            'd_headId': 'cd74f9fdbdb63c6d462bad39feddc7f1',
            'curPage': page,
        }
        main_thread = threading.Thread(target=main, args=(params,))
        main_thread.start()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85

scrapy框架爬取数据

items.py

import scrapy


class LiepingwangItem(scrapy.Item):
    title = scrapy.Field()
    money = scrapy.Field()
    city = scrapy.Field()
    edu = scrapy.Field()
    experience = scrapy.Field()
    company = scrapy.Field()
    financing = scrapy.Field()
    temptation_str = scrapy.Field()
    release_time = scrapy.Field()
    feedback_time = scrapy.Field()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14

middlewares.py

class LiepingwangDownloaderMiddleware:
    def process_request(self, request, spider):
        request.headers.update(
            {
                'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
            }
        )
        return None
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

pipelines.py

import csv


class LiepingwangPipeline:
    def __init__(self):
        self.file = open('data_2.csv', mode='a', encoding='utf-8', newline='')
        self.csv_file = csv.DictWriter(self.file, fieldnames=['title', 'money', 'city', 'edu',
                                                              'experience', 'company', 'financing', 'temptation_str',
                                                              'release_time', 'feedback_time'
                                                              ])
        self.csv_file.writeheader()

    def process_item(self, item, spider):
        dit = dict(item)
        dit['financing'] = dit['financing'].strip()
        dit['title'] = dit['title'].strip()
        self.csv_file.writerow(dit)
        return item

    def spider_closed(self, spider):
        self.file.close()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21

settings.py

ROBOTSTXT_OBEY = False
DOWNLOADER_MIDDLEWARES = {
   'liepingwang.middlewares.LiepingwangDownloaderMiddleware': 543,
}
ITEM_PIPELINES = {
   'liepingwang.pipelines.LiepingwangPipeline': 300,
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7


import scrapy

from ..items import LiepingwangItem


class ZpinfoSpider(scrapy.Spider):
    name = 'zpinfo'
    allowed_domains = ['liepin.com']
    start_urls = ['https://www.liepin.com/zhaopin/?sfrom=click-pc_homepage-centre_searchbox-search_new&d_sfrom=search_fp&key=python']

    def parse(self, response):
        lis = response.css('div.job-content div:nth-child(1) ul li')
        for li in lis:
            title = li.css('.job-info h3 a::text').get().strip()
            money = li.css('.condition span.text-warning::text').get()
            city = li.css('.condition .area::text').get()
            edu = li.css('.condition .edu::text').get()
            experience = li.css('.condition span:nth-child(4)::text').get()
            company = li.css('.company-name a::text').get()
            financing = li.css('.field-financing span::text').get()
            temptation_list = li.css('p.temptation.clearfix span::text').getall()
            temptation_str = '|'.join(temptation_list)
            release_time = li.css('p.time-info.clearfix time::text').get()
            feedback_time = li.css('p.time-info.clearfix span::text').get()
            yield LiepingwangItem(title=title, money=money, city=city, edu=edu, experience=experience, company=company,
                                  financing=financing, temptation_str=temptation_str, release_time=release_time,
                                  feedback_time=feedback_time)
        href = response.css('div.job-content div:nth-child(1) a:nth-child(9)::attr(href)').get()
        if href:
            next_url = 'https://www.liepin.com' + href
            yield scrapy.Request(url=next_url, callback=self.parse)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32

https://mp.weixin.qq.com/s?__biz=MzIwNDY5OTI2OA==&mid=2247484572&idx=1&sn=f3cc2cdfa282e4a7f2957132d5767f01&chksm=973d6777a04aee61406caa367502d7f0c949df4d7a1cb29d84a421306901a0f9987f33b63896&mpshare=1&scene=23&srcid=0105fgOy7x7G1n8cfmkH1EQk&sharer_sharetime=1609832431992&sharer_shareid=0597b648b9486d3310c44d3854420db3#rd