import urllib2
enable_proxy = True
proxy_handler = urllib2.ProxyHandler({"http" : 'http://some-proxy.com:8080'})
null_proxy_handler = urllib2.ProxyHandler({})
if enable_proxy:
opener = urllib2.build_opener(proxy_handler)
else:
opener = urllib2.build_opener(null_proxy_handler)
urllib2.install_opener(opener)
import urllib2 import socket socket.setdefaulttimeout(10) # 10 秒钟后超时 urllib2.socket.setdefaulttimeout(10) # 另一种方式
import urllib2
response = urllib2.urlopen('http://www.google.com', timeout=10)
import urllib2
request = urllib2.Request(uri)
request.add_header('User-Agent', 'fake-client')
response = urllib2.urlopen(request)
import urllib2
response = urllib2.urlopen('http://www.google.cn')
redirected = response.geturl() == 'http://www.google.cn'
import urllib2
class RedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
pass
def http_error_302(self, req, fp, code, msg, headers):
pass
opener = urllib2.build_opener(RedirectHandler)
opener.open('http://www.google.cn')
import urllib2
import cookielib
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
response = opener.open('http://www.google.com')
for item in cookie:
if item.name == 'some_cookie_item_name':
print item.value
import urllib2 request = urllib2.Request(uri, data=data) request.get_method = lambda: 'PUT' # or 'DELETE' response = urllib2.urlopen(request)
import urllib2
try:
response = urllib2.urlopen('http://restrict.web.com')
except urllib2.HTTPError, e:
print e.code
Debug Log
import urllib2
httpHandler = urllib2.HTTPHandler(debuglevel=1)
httpsHandler = urllib2.HTTPSHandler(debuglevel=1)
opener = urllib2.build_opener(httpHandler, httpsHandler)
urllib2.install_opener(opener)
response = urllib2.urlopen('http://www.google.com')
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib2
import datetime
import time
import PyRSS2Gen
from email.Utils import formatdate
import re
import sys
import os
reload(sys)
sys.setdefaultencoding('utf-8')
class RssSpider():
def __init__(self):
self.myrss = PyRSS2Gen.RSS2(title='OSChina',
link='http://my.oschina.net',
description=str(datetime.date.today()),
pubDate=datetime.datetime.now(),
lastBuildDate = datetime.datetime.now(),
items=[]
)
self.xmlpath=r'/var/www/myrss/oschina.xml'
self.baseurl="http://www.oschina.net/blog"
#if os.path.isfile(self.xmlpath):
#os.remove(self.xmlpath)
def useragent(self,url):
i_headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36", \
"Referer": 'http://baidu.com/'}
req = urllib2.Request(url, headers=i_headers)
html = urllib2.urlopen(req).read()
return html
def enterpage(self,url):
pattern = re.compile(r'\d{4}\S\d{2}\S\d{2}\s\d{2}\S\d{2}')
rsp=self.useragent(url)
soup=BeautifulSoup(rsp)
timespan=soup.find('div',{'class':'BlogStat'})
timespan=str(timespan).strip().replace('\n','').decode('utf-8')
match=re.search(r'\d{4}\S\d{2}\S\d{2}\s\d{2}\S\d{2}',timespan)
timestr=str(datetime.date.today())
if match:
timestr=match.group()
#print timestr
ititle=soup.title.string
div=soup.find('div',{'class':'BlogContent'})
rss=PyRSS2Gen.RSSItem(
title=ititle,
link=url,
description = str(div),
pubDate = timestr
)
return rss
def getcontent(self):
rsp=self.useragent(self.baseurl)
soup=BeautifulSoup(rsp)
ul=soup.find('div',{'id':'RecentBlogs'})
for li in ul.findAll('li'):
div=li.find('div')
if div is not None:
alink=div.find('a')
if alink is not None:
link=alink.get('href')
print link
html=self.enterpage(link)
self.myrss.items.append(html)
def SaveRssFile(self,filename):
finallxml=self.myrss.to_xml(encoding='utf-8')
file=open(self.xmlpath,'w')
file.writelines(finallxml)
file.close()
if __name__=='__main__':
rssSpider=RssSpider()
rssSpider.getcontent()
rssSpider.SaveRssFile('oschina.xml')
http://104.224.129.109/myrss/oschina.xml
机械节能产品生产企业官网模板...
大气智能家居家具装修装饰类企业通用网站模板...
礼品公司网站模板
宽屏简约大气婚纱摄影影楼模板...
蓝白WAP手机综合医院类整站源码(独立后台)...苏ICP备2024110244号-2 苏公网安备32050702011978号 增值电信业务经营许可证编号:苏B2-20251499 | Copyright 2018 - 2025 源码网商城 (www.ymwmall.com) 版权所有