127.0.0.1 - - [19/Jun/2012:09:16:22 +0100] "GET /GO.jpg HTTP/1.1" 499 0 "http://domain.com/htm_data/7/1206/758536.html" "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; SE 2.X MetaSr 1.0)" 127.0.0.1 - - [19/Jun/2012:09:16:25 +0100] "GET /Zyb.gif HTTP/1.1" 499 0 "http://domain.com/htm_data/7/1206/758536.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; QQDownload 711; SV1; .NET4.0C; .NET4.0E; 360SE)"
import re
from operator import itemgetter
def parser_logfile(logfile):
pattern = (r''
'(\d+.\d+.\d+.\d+)\s-\s-\s' #IP address
'\[(.+)\]\s' #datetime
'"GET\s(.+)\s\w+/.+"\s' #requested file
'(\d+)\s' #status
'(\d+)\s' #bandwidth
'"(.+)"\s' #referrer
'"(.+)"' #user agent
)
fi = open(logfile, 'r')
url_list = []
for line in fi:
url_list.append(re.findall(pattern, line))
fi.close()
return url_list
def parser_urllist(url_list):
urls = []
for url in url_list:
for r in url:
urls.append(r[5])
return urls
def get_urldict(urls):
d = {}
for url in urls:
d[url] = d.get(url,0)+1
return d
def url_count(logfile):
url_list = parser_logfile(logfile)
urls = parser_urllist(url_list)
totals = get_urldict(urls)
return totals
if __name__ == '__main__':
urls_with_counts = url_count('example.log')
sorted_by_count = sorted(urls_with_counts.items(), key=itemgetter(1), reverse=True)
print(sorted_by_count)
>>> from operator import itemgetter
>>> a=[('b',2),('a',1),('c',0)]
>>> s=sorted(a,key=itemgetter(1))
>>> s
[('c', 0), ('a', 1), ('b', 2)]
>>> s=sorted(a,key=itemgetter(0))
>>> s
[('a', 1), ('b', 2), ('c', 0)]
[('http://domain.com/htm_data/7/1206/758536.html', 141), ('http://domain.com/?q=node&page=12', 3), ('http://website.net/htm_data/7/1206/758536.html', 1)]
机械节能产品生产企业官网模板...
大气智能家居家具装修装饰类企业通用网站模板...
礼品公司网站模板
宽屏简约大气婚纱摄影影楼模板...
蓝白WAP手机综合医院类整站源码(独立后台)...苏ICP备2024110244号-2 苏公网安备32050702011978号 增值电信业务经营许可证编号:苏B2-20251499 | Copyright 2018 - 2025 源码网商城 (www.ymwmall.com) 版权所有