class ProxyScrapy(object):
def __init__(self):
self.proxy_robot = ProxyRobot()
self.current_proxy = None
self.cookie = cookielib.CookieJar()
def __builder_proxy_cookie_opener(self):
cookie_handler = urllib2.HTTPCookieProcessor(self.cookie)
handlers = [cookie_handler]
if PROXY_ENABLE:
self.current_proxy = ip_port = self.proxy_robot.get_random_proxy()
proxy_handler = urllib2.ProxyHandler({'http': ip_port[7:]})
handlers.append(proxy_handler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
return opener
def get_html_body(self,url):
opener = self.__builder_proxy_cookie_opener()
request=urllib2.Request(url)
#request.add_header("Accept-Encoding", "gzip,deflate,sdch")
#request.add_header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
#request.add_header("Cache-Control", "no-cache")
#request.add_header("Connection", "keep-alive")
try:
response = opener.open(request,timeout=2)
http_code = response.getcode()
if http_code == 200:
if PROXY_ENABLE:
self.proxy_robot.handle_success_proxy(self.current_proxy)
html = response.read()
return html
else:
if PROXY_ENABLE:
self.proxy_robot.handle_double_proxy(self.current_proxy)
return self.get_html_body(url)
except Exception as inst:
print inst,self.current_proxy
self.proxy_robot.handle_double_proxy(self.current_proxy)
return self.get_html_body(url)
def search_keywords_rank(keyword_company_name, keywords):
def get_context(url):
start=clock()
html=curl.get_html_body(url)
finish=clock()
print url,(finish-start)
d = pq(html)
items = d("#J-items-content .ls-item")
items_c = len(items)
print items_c
if items_c < 38:
return get_context(url)
return items, items_c
result = OrderedDict()
for keyword in keywords:
for page_index in range(1,9):
u = url % (re.sub('\s+', '_', keyword.strip()), page_index)
items, items_c = get_context(u)
b = False
for item_index in range(0, items_c):
e=items.eq(item_index).find('.title a')
p_title = e.text()
p_url = e.attr('href')
e=items.eq(item_index).find('.cright h3 .dot-product')
company_name = e.text()
company_url = e.attr('href')
if keyword_company_name in company_url:
total_index = (page_index-1)*38 +item_index+1+(0 if page_index==1 else 5)
print 'page %s, index %s, total index %s' % (page_index, item_index+1, total_index)
b = True
if keyword not in result:
result[keyword] = (p_title, p_url, page_index, item_index+1, total_index, u)
break
if b:
break
return result
机械节能产品生产企业官网模板...
大气智能家居家具装修装饰类企业通用网站模板...
礼品公司网站模板
宽屏简约大气婚纱摄影影楼模板...
蓝白WAP手机综合医院类整站源码(独立后台)...苏ICP备2024110244号-2 苏公网安备32050702011978号 增值电信业务经营许可证编号:苏B2-20251499 | Copyright 2018 - 2025 源码网商城 (www.ymwmall.com) 版权所有