1 Star 1 Fork 3

tikazyq / spider-gsxt

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
spider.py 17.22 KB
一键复制 编辑 原始数据 按行查看 历史
Yeqing Zhang 提交于 2017-12-08 16:56 . 添加
# -*-coding:utf-8 -*-
import os
import random
import time
from datetime import datetime, date
from selenium.webdriver import ActionChains, DesiredCapabilities
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from urllib.request import urlretrieve
from selenium import webdriver
from bs4 import BeautifulSoup
import PIL.Image as image
import re
from pyvirtualdisplay import Display
import utils
from db import Session, Company
class GsxtSpider:
def __init__(self, keyword, use_chrome=False):
self.url = 'http://www.gsxt.gov.cn/index.html'
self.keyword = keyword
self.BORDER = 6
self.mysql = Session()
self.data = []
self.use_chrome = use_chrome
self.browser = None
self.wait = None
self.display = Display(size=(1280, 720))
self.display.start()
self.init_browser()
def init_browser(self):
tic = datetime.now().timestamp()
print('Opening the browser')
if self.use_chrome:
self.browser = webdriver.Chrome()
self.wait = WebDriverWait(self.browser, 5)
print('Browser opened')
toc = datetime.now().timestamp()
print('Time cost: %.1f sec' % (toc - tic))
return
desired_capabilities = DesiredCapabilities.PHANTOMJS.copy()
headers = {'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Connection': 'keep-alive',
}
for key, value in headers.items():
desired_capabilities['phantomjs.page.customHeaders.{}'.format(key)] = value
desired_capabilities[
'phantomjs.page.customHeaders.User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
self.browser = webdriver.PhantomJS(desired_capabilities=desired_capabilities)
self.browser.set_window_size(1280, 773)
self.wait = WebDriverWait(self.browser, 5)
def open(self):
"""
打开浏览器,并输入查询内容
"""
self.browser.get(self.url)
# 如果IP被封, 采用代理IP地址
err_num = 0
while self.is_ip_blocked() and err_num < 10:
print('IP is blocked, start retrying')
self.browser.quit()
self.init_browser()
time.sleep(1)
err_num += 1
self.browser.set_page_load_timeout(10)
self.browser.set_script_timeout(10)
keyword = self.wait_for(By.ID, "keyword")
keyword.send_keys(self.keyword)
time.sleep(0.2)
button = self.wait_for(By.ID, 'btn_query')
button.click()
time.sleep(1)
# self.browser.save_screenshot('1.png')
def is_ip_blocked(self):
return '最近有可疑的攻击行为' in self.browser.page_source
def get_images(self, bg_filename, fullbg_filename):
"""
获取验证码图片
:return: 图片的location信息
"""
bg = []
fullgb = []
while bg == [] and fullgb == []:
bf = BeautifulSoup(self.browser.page_source, 'lxml')
bg = bf.find_all('div', class_='gt_cut_bg_slice')
fullgb = bf.find_all('div', class_='gt_cut_fullbg_slice')
try:
bg_url = re.findall('url\(\"(.*)\"\);', bg[0].get('style'))[0].replace('webp', 'jpg')
except Exception as err:
bg_url = re.findall('url\((.*)\);', bg[0].get('style'))[0].replace('webp', 'jpg')
try:
fullgb_url = re.findall('url\(\"(.*)\"\);', fullgb[0].get('style'))[0].replace('webp', 'jpg')
except Exception as err:
fullgb_url = re.findall('url\((.*)\);', fullgb[0].get('style'))[0].replace('webp', 'jpg')
bg_location_list = []
fullbg_location_list = []
for each_bg in bg:
location = {}
location['x'] = int(re.findall('background-position: (.*)px (.*)px;', each_bg.get('style'))[0][0])
location['y'] = int(re.findall('background-position: (.*)px (.*)px;', each_bg.get('style'))[0][1])
bg_location_list.append(location)
for each_fullgb in fullgb:
location = {}
location['x'] = int(re.findall('background-position: (.*)px (.*)px;', each_fullgb.get('style'))[0][0])
location['y'] = int(re.findall('background-position: (.*)px (.*)px;', each_fullgb.get('style'))[0][1])
fullbg_location_list.append(location)
urlretrieve(url=bg_url, filename=bg_filename)
# print('缺口图片下载完成')
urlretrieve(url=fullgb_url, filename=fullbg_filename)
# print('背景图片下载完成')
return bg_location_list, fullbg_location_list
def get_merge_image(self, filename, location_list):
"""
根据位置对图片进行合并还原
:filename:图片
:location_list:图片位置
"""
im = image.open(filename)
new_im = image.new('RGB', (260, 116))
im_list_upper = []
im_list_down = []
for location in location_list:
if location['y'] == -58:
im_list_upper.append(im.crop((abs(location['x']), 58, abs(location['x']) + 10, 166)))
if location['y'] == 0:
im_list_down.append(im.crop((abs(location['x']), 0, abs(location['x']) + 10, 58)))
new_im = image.new('RGB', (260, 116))
x_offset = 0
for im in im_list_upper:
new_im.paste(im, (x_offset, 0))
x_offset += im.size[0]
x_offset = 0
for im in im_list_down:
new_im.paste(im, (x_offset, 58))
x_offset += im.size[0]
new_im.save(filename)
return new_im
def is_pixel_equal(self, img1, img2, x, y):
"""
判断两个像素是否相同
:param image1: 图片1
:param image2: 图片2
:param x: 位置x
:param y: 位置y
:return: 像素是否相同
"""
# 取两个图片的像素点
pix1 = img1.load()[x, y]
pix2 = img2.load()[x, y]
threshold = 60
if (abs(pix1[0] - pix2[0] < threshold) and abs(pix1[1] - pix2[1] < threshold) and abs(
pix1[2] - pix2[2] < threshold)):
return True
else:
return False
def get_gap(self, img1, img2):
"""
获取缺口偏移量
:param img1: 不带缺口图片
:param img2: 带缺口图片
:return:
"""
left = 43
for i in range(left, img1.size[0]):
for j in range(img1.size[1]):
if not self.is_pixel_equal(img1, img2, i, j):
left = i
return left
return left
def get_track(self, distance):
"""
根据偏移量获取移动轨迹
:param distance: 偏移量
:return: 移动轨迹
"""
# 移动轨迹
track = []
# 当前位移
current = 0
# 减速阈值
mid = distance * 4 / 5
# 计算间隔
t = 0.2
# 初速度
v = 0
while current < distance:
if current < mid:
# 加速度为正2
a = 2
else:
# 加速度为负3
a = -3
# 初速度v0
v0 = v
# 当前速度v = v0 + at
v = v0 + a * t
# 移动距离x = v0t + 1/2 * a * t^2
move = v0 * t + 1 / 2 * a * t * t
# 当前位移
current += move
# 加入轨迹
track.append(round(move))
return track
def get_slider(self):
"""
获取滑块
:return: 滑块对象
"""
while True:
try:
slider = self.browser.find_element_by_xpath("//div[@class='gt_slider_knob gt_show']")
break
except:
time.sleep(0.5)
return slider
def move_to_gap(self, slider, track):
"""
拖动滑块到缺口处
:param slider: 滑块
:param track: 轨迹
:return:
"""
ActionChains(self.browser).click_and_hold(slider).perform()
# _track = []
while track:
# tic = datetime.now().timestamp()
x = random.choice(track)
ActionChains(self.browser).move_by_offset(xoffset=x, yoffset=0).perform()
# toc = datetime.now().timestamp()
# t = toc - tic
# _track.append([x, t])
track.remove(x)
time.sleep(0.5)
ActionChains(self.browser).release().perform()
# print(_track)
def wait_for(self, by1, by2):
return self.wait.until(EC.presence_of_element_located((by1, by2)))
def get_ans(self):
element = self.wait_for(By.CLASS_NAME, "gt_info_text")
ans = element.text.encode("utf-8").decode()
# print(ans)
return ans
def verify(self):
print('Start verifying')
tic = datetime.now().timestamp()
flag = True
# 打开浏览器
self.open()
# 临时文件夹
tmp_dir = './tmp/'
if not os.path.exists('./tmp/'):
os.makedirs('./tmp/')
# 保存的图片名字
bg_filename = 'bg_%s.jpg' % utils.encrypt(str(datetime.now()))
fullbg_filename = 'fullbg_%s.jpg' % utils.encrypt(str(datetime.now()))
stats = {
'error': 0,
'success': 0,
'total': 0
}
i = 0
while flag:
# 获取图片
bg_location_list, fullbg_location_list = self.get_images(tmp_dir + bg_filename, tmp_dir + fullbg_filename)
# 根据位置对图片进行合并还原
bg_img = self.get_merge_image(tmp_dir + bg_filename, bg_location_list)
fullbg_img = self.get_merge_image(tmp_dir + fullbg_filename, fullbg_location_list)
# 获取缺口位置
gap = self.get_gap(fullbg_img, bg_img)
# print('缺口位置', gap)
track = self.get_track(gap - self.BORDER)
# print('滑动滑块')
# print(track)
# 点按呼出缺口
slider = self.get_slider()
# 拖动滑块到缺口处
self.move_to_gap(slider, track)
time.sleep(0.8)
# 获取验证返回信息
ans = self.get_ans()
# print(ans)
stats['total'] += 1
if '通过' in ans:
# print('验证成功')
print('Verification succeeded')
stats['success'] += 1
break
elif '出现错误' in ans:
stats['error'] += 1
# print('出现错误, 需要重试')
print('An error occurred. Retrying...')
else:
stats['error'] += 1
print('Verification failed.')
print(stats)
time.sleep(3)
self.open()
if i >= 5:
i = 0
print('Error times exceeded 5, quit browser and retry')
time.sleep(random.randint(2, 5))
self.browser.quit()
self.init_browser()
self.open()
continue
i += 1
toc = datetime.now().timestamp()
print('Verification time cost %.1f sec' % (toc - tic))
time.sleep(5)
def get_company_list(self):
tic = datetime.now().timestamp()
data_dict = {}
# 遍历查询结果列表(包括分页)
# 储存好需要查询的URL以及统一社会信用代码
while True:
for i, elem in enumerate(self.browser.find_elements_by_class_name('search_list_item')):
# 如果该企业已存在于数据库统一社会信用代码且未超过缓存时限, 则返回数据库中缓存数据
cc = elem.find_element_by_xpath('/html/body/div[5]/div[3]/a[%s]/div[2]/div[1]/span' % (i + 1)).text
n = elem.find_element_by_xpath('/html/body/div[5]/div[3]/a[%s]/h1' % (i + 1)).text
c = self.mysql.query(Company).filter_by(credit_code=cc).first()
if c and datetime.now().timestamp() < c.update_ts + 86400:
data_dict[cc] = c.to_dict()
# print('Saved "%s"' % n)
continue
# 点击企业详情
# print('Saved "%s"' % n)
ActionChains(self.browser).click(elem).perform()
time.sleep(1)
self.browser.switch_to_window(self.browser.window_handles[1])
c = self.get_company_detail()
if not c:
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
continue
data_dict[cc] = c
# print('Saved "%s"' % n)
time.sleep(0.1)
self.browser.close()
self.browser.switch_to_window(self.browser.window_handles[0])
time.sleep(0.2)
# 点击下一页
try:
next_button = self.browser.find_element_by_link_text('下一页')
except Exception as err:
# print(err)
print('Reached the last page')
break
print('Navigating to the next page')
ActionChains(self.browser).click(next_button).perform()
time.sleep(0.5)
toc = datetime.now().timestamp()
print('Fetching data time cost %.1f sec' % (toc - tic))
data = list(data_dict.values())
return data
def get_company_detail(self):
# 查询详情页
# err_num = 0
# while err_num < 5:
# try:
# print('请求 %s' % url)
# self.browser.get(url)
# break
# except Exception as err:
# err_num += 1
# print(err)
# # throttle
# t = random.randint(1, 5)
# print('等待%s秒' % t)
# time.sleep(t)
# if err_num == 5:
# return None
time.sleep(1)
data_dict = {}
dt_list = self.browser.find_elements_by_xpath('//*[@id="primaryInfo"]/div/div/dl/dt')
dd_list = self.browser.find_elements_by_xpath('//*[@id="primaryInfo"]/div/div/dl/dd')
for i, (dt, dd) in enumerate(zip(dt_list, dd_list)):
data_dict[dt.text.strip().replace(':', '')] = dd.text.strip()
# 注册资本
rc = data_dict.get('注册资本')
if rc:
g = re.search('(\d+(?:\.\d+)?)万', rc).groups()
if g:
rc = float(g[0])
else:
rc = None
# 成立日期
d = data_dict.get('成立日期') or data_dict.get('注册日期')
if d:
g = re.search('(\d+)年(\d+)月(\d+)日', d).groups()
if g:
d = date(int(g[0]), int(g[1]), int(g[2]))
else:
d = None
# 企业名称
n = data_dict.get('企业名称') or data_dict.get('名称')
# 统一社会信用代码
credit_code = data_dict.get('统一社会信用代码') or data_dict.get('注册号')
# 法人代表
rep = data_dict.get('法定代表人') or data_dict.get('负责人') or data_dict.get('经营者')
# 住所
loc = data_dict.get('住所') or data_dict.get('营业场所')
if not credit_code:
return None
c = self.mysql.query(Company).filter_by(credit_code=credit_code).first()
if not c:
c = Company(
register_capital=rc,
representative=rep,
establish_date=d,
business_scope=data_dict.get('经营范围'),
company_name=data_dict.get('企业名称'),
company_address=loc,
credit_code=credit_code,
update_ts=datetime.now().timestamp()
)
self.mysql.add(c)
self.mysql.commit()
else:
c.register_capital = rc
c.representative = rep
c.establish_date = d
c.business_scope = data_dict.get('经营范围')
c.company_name = n
c.company_address = loc
c.update_ts = datetime.now().timestamp()
self.mysql.commit()
print('Saved a company item (ID: %s)' % credit_code)
return c.to_dict()
def quit(self):
self.browser.quit()
self.mysql.close()
self.display.stop()
print('Quiting browser')
def run(self):
tic = datetime.now().timestamp()
self.verify()
self.data = self.get_company_list()
self.quit()
toc = datetime.now().timestamp()
print('Total time cost %.1f sec' % (toc - tic))
return self.data
if __name__ == '__main__':
spider = GsxtSpider('支付宝', True)
spider.run()
print(spider.data)
Python
1
https://gitee.com/tikazyq/spider-gsxt.git
git@gitee.com:tikazyq/spider-gsxt.git
tikazyq
spider-gsxt
spider-gsxt
master

搜索帮助