700字范文,内容丰富有趣,生活中的好帮手!
700字范文 > 根据搜索内容爬取拉钩网和招聘网的职位招聘信息

根据搜索内容爬取拉钩网和招聘网的职位招聘信息

时间:2019-02-28 02:00:12

相关推荐

根据搜索内容爬取拉钩网和招聘网的职位招聘信息

代码:

import requestsimport timeimport randomip_list = ['117.135.132.107', '121.8.98.196', '194.116.198.212']#http请求头信息headers={'Accept':'application/json, text/javascript, */*; q=0.01','Accept-Encoding':'gzip, deflate, br','Accept-Language':'zh-CN,zh;q=0.8','Connection':'keep-alive','Content-Length':'25','Content-Type':'application/x-www-form-urlencoded; charset=UTF-8','Cookie':'user_trace_token=021402-9151732d-f216-11e6-acb5-525400f775ce; LGUID=021402-91517b06-f216-11e6-acb5-525400f775ce; JSESSIONID=ABAAABAAAGFABEF53B117A40684BFB6190FCDFF136B2AE8; _putrc=ECA3D429446342E9; login=true; unick=yz; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%%2F; TG-TRACK-CODE=index_navigation; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1494688520,1494690499,1496044502,1496048593; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1496061497; _gid=GA1.2.2090691601.1496061497; _gat=1; _ga=GA1.2.1759377285.1487008943; LGSID=0529203716-8c254049-446b-11e7-947e-5254005c3644; LGRID=0529203828-b6fc4c8e-446b-11e7-ba7f-525400f775ce; SEARCH_ID=13c3482b5ddc4bb7bfda721bbe6d71c7; index_location_city=%E6%9D%AD%E5%B7%9E','Host':'','Origin':'','Referer':'/jobs/list_Python?','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36','X-Anit-Forge-Code':'0','X-Anit-Forge-Token':'None','X-Requested-With':'XMLHttpRequest'}def get_json(url,page,lange_name):#构造一个framdata数据FramData = {'firts':'true','pn':page,'kd':lange_name}#采用request是post方法,返回requests<200>,访问成功JsonDatas = requests.post(url,FramData,headers=headers,proxies={'http': 'http://' + random.choice(ip_list)}).json()#获取字典数据#JsonDatas = jsonData.json()return JsonDatasdef parser_json(page,JsonDatas):#JsonDatas数据库类型是字典#total = int(JsonDatas['content']['positionResult']['totalCount'])companyInfos = []#获取招聘信息的公司,列表类型companyInfo = JsonDatas['content']['positionResult']['result']#对每一个公司遍历print("正在解析{0}页招聘信息".format(page))for company in companyInfo:#定义一个列表,暂时存储一个公司信息comInfo = []#公司所在城市if company['district'] is not None:city = company['city'] + '-' + company['district']else:city = company['city']#print(city)comInfo.append(city)# 职位名称positionName = company['positionName']#print(positionName)comInfo.append(positionName)#获取公司名称companyFullName = company['companyFullName']+ '(' + company['companyShortName'] + ')'#print(companyFullName)comInfo.append(companyFullName)#要求学历education = company['education']#print(education)comInfo.append(education)#职位类型jobNature = company['jobNature']#print(jobNature)comInfo.append(jobNature)#职位待遇positionAdvantages = company['positionAdvantage']positionAdvantage = positionAdvantages.replace(',',';').replace(',',';')#print(positionAdvantage)comInfo.append(positionAdvantage)#工资salary = company['salary']#print(salary)comInfo.append(salary)#经验要求workYear = company['workYear']comInfo.append(workYear)#分布时间time = company['createTime']comInfo.append(time)#将每个公司的信息加入companyInfos中companyInfos.append(comInfo)print("第{0}页解析完成".format(page))return companyInfosdef writeCSV(page,fw,companyInfos):for companyInfo in companyInfos:#print(companyInfo)fw.write(",".join(companyInfo)+'\n')print("第{0}页数据写入完毕".format(page))def main():path = 'F:' # 文件存储路径start_page = 1end_page = 20 #默认lange_name = input("请输入要所有的职位:")city = input("请输入工作地点:")#创建文件fw = open(path + '\lagou_' + lange_name + '.csv', 'a+')#构造url链接start_url = '/jobs/positionAjax.json?px=default&city='end_url = '&needAddtionalResult=false&isSchoolJob=0'url=start_url + city + end_urlpage = start_pagerow = ['工作地点','职位名称', '公司名称', '要求学历', '工作性质', '工作福利', '薪水', '工作经验要求','发布时间']fw.write(",".join(row) + '\n')while page < end_page:time.sleep(12)print("正在抓取第{0}页招聘数据信息".format(page))#获取json数据JsonDatas = get_json(url,page,lange_name)#对获取的数据进行解析companyInfos = parser_json(page,JsonDatas)#将信息写入CSV文件中writeCSV(page,fw,companyInfos)page = page+1print("所有数据写入完毕")if __name__ == '__main__':main()

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。