调整了注释

This commit is contained in:
lizanle 2015-01-27 14:21:51 +08:00
parent 5afbf6dbca
commit d8b5252279
1 changed files with 150 additions and 127 deletions

View File

@ -1,62 +1,73 @@
# Time 2015-01-26 17:30:16
# Author lizanle
# Description 日志帮助类
module SystemLogHelper
class SystemLog
class << self
# 分页(支持多关键字查询)
def logo_data(page,per,search,day)
logs = find_all_logs day
if logs.empty? #如果返回的是空數組,就說明日誌文件不存在
return logs
end
# 根据search参数来决定是否需要查询
keywords = search
if keywords && !keywords.strip.blank?
# 把keywords转化成正则表达式数组
keywords = keywords.strip.split(/\s+/).collect! {|w| Regexp.new(w, 'i')}
# 一条记录应该匹配每个关键字 log =~ r 是对log记录进行判断是否符合r的正则表达式
logs = logs.find_all do |log|
keywords.all? { |r| log =~ r }
end
#用Kaminari分页
logs = Kaminari.paginate_array(logs).page(page).per(per).collect! {|log| parse(log)}
#将分页后的记录的搜索结果添加样式,样式中的\0是给给r占位置的。
logs.collect! do |log|
keywords.each { |r| log.gsub!(r, '<span class="search_results">\0</span>')}
log
end
else
logs = Kaminari.paginate_array(logs).page(page).per(per).collect! {|log| parse(log)}
end
logs
class SystemLog
class << self
# Time 2015-01-26 17:29:17
# Author lizanle
# Description 分页(支持多关键字查询)
def logo_data(page, per, search, day)
logs = find_all_logs day
if logs.empty? #如果返回的是空數組,就說明日誌文件不存在
return logs
end
#清除日誌
def clear day
if File::exists?(logfile_path day)
File.open(logfile_path(day), 'w') do |f|
f.print ''
end
else
# 根据search参数来决定是否需要查询
keywords = search
if keywords && !keywords.strip.blank?
# 把keywords转化成正则表达式数组
keywords = keywords.strip.split(/\s+/).collect! { |w| Regexp.new(w, 'i') }
# 一条记录应该匹配每个关键字 log =~ r 是对log记录进行判断是否符合r的正则表达式
logs = logs.find_all do |log|
keywords.all? { |r| log =~ r }
end
end
#讀取日誌
private
def find_all_logs day
if File::exists?(logfile_path day)
File.open(logfile_path day) do |f|
#打开文件并按照正则表达式切分逆序最新一个记录可以扔掉因为最新的记录永远都是访问System_log)
f.read.split("Processing").reverse[1..-1]
end
else
[]
#用Kaminari分页
logs = Kaminari.paginate_array(logs).page(page).per(per).collect! { |log| parse(log) }
#将分页后的记录的搜索结果添加样式,样式中的\0是给给r占位置的。
logs.collect! do |log|
keywords.each { |r| log.gsub!(r, '<span class="search_results">\0</span>') }
log
end
else
logs = Kaminari.paginate_array(logs).page(page).per(per).collect! { |log| parse(log) }
end
logs
end
# 日志文件的路径一般在Rails.root/log下根据环境配置
# 依次记录到product.log development.log test.log中
def logfile_path day
#将日期处理成2015-01-01的形式
unless day.nil?
# Time 2015-01-26 17:28:57
# Author lizanle
# Description 清除日誌
def clear day
if File::exists?(logfile_path day)
File.open(logfile_path(day), 'w') do |f|
f.print ''
end
else
end
end
# Time 2015-01-26 17:28:49
# Author lizanle
# Description 讀取日誌
private
def find_all_logs day
if File::exists?(logfile_path day)
File.open(logfile_path day) do |f|
#打开文件并按照正则表达式切分逆序最新一个记录可以扔掉因为最新的记录永远都是访问System_log)
f.read.split("Processing").reverse[1..-1]
end
else
[]
end
end
# Time 2015-01-26 17:28:34
# Author lizanle
# Description 日志文件的路径一般在Rails.root/log下根据环境配置
# 依次记录到product.log development.log test.log中
def logfile_path day
#将日期处理成2015-01-01的形式
unless day.nil?
dayArr = day.split('-')
if dayArr[1].length == 1
dayArr[1] = "0" + dayArr[1]
@ -65,93 +76,105 @@ module SystemLogHelper
dayArr[2] = "0" + dayArr[2]
end
day = dayArr.join('-')
end
#如果不是當天,則需要加後綴
if !day.nil? && !day.strip.blank? && day != Time.now.strftime("%Y-%m-%d")
File.join(Rails.root, "log", "#{Rails.env}.log.#{day.gsub('-','')}")
else
File.join(Rails.root, "log", "#{Rails.env}.log")
end
end
#如果不是當天,則需要加後綴
if !day.nil? && !day.strip.blank? && day != Time.now.strftime("%Y-%m-%d")
File.join(Rails.root, "log", "#{Rails.env}.log.#{day.gsub('-', '')}")
else
File.join(Rails.root, "log", "#{Rails.env}.log")
end
end
# Time 2015-01-26 17:28:22
# Author lizanle
# Description 替換換行符
def parse(log)
ERB::Util.html_escape(log.gsub(/\e\[[\d;m]+/, '')).gsub("\n", "<br/>")
end
# Time 2015-01-26 17:28:07
# Author lizanle
# Description 定义响应正则表达式 2015-01-20 11:31:13 INFO -- Completed 200 OK in 125ms (Views: 81.0ms | ActiveRecord: 2.0ms)
def response_regex
'Completed \d+ \w+ in (\d+)ms \(Views: (\d+\.\d+)?ms \| ActiveRecord: (\d+\.\d+)?ms\)'
end
# Time 2015-01-26 17:27:51
# Author lizanle
# Description 将一条记录中的地址主机等都分析出来
def get_status(paragraph)
request_regex = 'Started GET \"(\/.*)\" for ([\d]+\.[\d]+\.[\d]+\.[\d]+) at [\d]*-([\d]*-[\d]* [\d]*:[\d]*:[\d]*)'
controller_regex = 'Processing by ([\w]+#[\w]+)'
page_time_regex = 'Views: \d+(\.\d+)?ms'
activeRecord_time_regex = 'ActiveRecord: \d+(\.\d+)?ms'
#解析请求中的正则,主机,时间
if paragraph.match(request_regex) != nil
request_url = paragraph.match(request_regex)[1] #正则表达式中的括号能够截取成数组
request_host = paragraph.match(request_regex)[2]
request_at = paragraph.match(request_regex)[3]
end
#替換換行符
def parse(log)
ERB::Util.html_escape(log.gsub(/\e\[[\d;m]+/, '')).gsub("\n", "<br/>")
#解析控制器
if paragraph.match(controller_regex) != nil
controller_name = paragraph.match(controller_regex)[1]
end
#定义响应正则表达式 2015-01-20 11:31:13 INFO -- Completed 200 OK in 125ms (Views: 81.0ms | ActiveRecord: 2.0ms)
def response_regex
'Completed \d+ \w+ in (\d+)ms \(Views: (\d+\.\d+)?ms \| ActiveRecord: (\d+\.\d+)?ms\)'
#解析响应时间以及计算百分比
if paragraph.match(response_regex) != nil
#print(paragraph.match(response_regex))
total_time = paragraph.match(response_regex)[1]
page_time = paragraph.match(response_regex)[2]
activeRecord_time = paragraph.match(response_regex)[3]
page_time_percent = page_time.to_f/(total_time.to_f)
activeRecord_time_percent = activeRecord_time.to_f/(total_time.to_f)
else
end
#将解析结果当做一条记录数组返回
request_status = [request_url, request_host, request_at,
controller_name, total_time, page_time, page_time_percent, activeRecord_time, activeRecord_time_percent]
request_status
end
#将一条记录中的地址主机等都分析出来
def get_status(paragraph)
request_regex = 'Started GET \"(\/.*)\" for ([\d]+\.[\d]+\.[\d]+\.[\d]+) at [\d]*-([\d]*-[\d]* [\d]*:[\d]*:[\d]*)'
controller_regex = 'Processing by ([\w]+#[\w]+)'
page_time_regex = 'Views: \d+(\.\d+)?ms'
activeRecord_time_regex = 'ActiveRecord: \d+(\.\d+)?ms'
if paragraph.match(request_regex) != nil
request_url = paragraph.match(request_regex)[1] #正则表达式中的括号能够截取成数组
request_host = paragraph.match(request_regex)[2]
request_at = paragraph.match(request_regex)[3]
end
if paragraph.match(controller_regex) != nil
controller_name = paragraph.match(controller_regex)[1]
end
if paragraph.match(response_regex) != nil
#print(paragraph.match(response_regex))
total_time = paragraph.match(response_regex)[1]
page_time = paragraph.match(response_regex)[2]
activeRecord_time = paragraph.match(response_regex)[3]
page_time_percent = page_time.to_f/(total_time.to_f)
activeRecord_time_percent = activeRecord_time.to_f/(total_time.to_f)
else
end
request_status = [request_url, request_host, request_at,
controller_name, total_time,page_time,page_time_percent,activeRecord_time,activeRecord_time_percent]
request_status
end
#分析日志
public
def analysis day
csv = Array.new
#如果文件不存在,则直接返回空数组
if File::exists?(logfile_path day)
File.open(logfile_path(day), "r:utf-8") do |file|
paragraph = ""
begin_flag = false
# 对每一行进行判断
file.each do |line|
# 以"Started GET "开头为一个paragraph
#print(line.match('[\d]*-([\d]*-[\d]* [\d]*:[\d]*:[\d]*) INFO -- Started GET ') == nil)
if (line.match('[\d]*-([\d]*-[\d]* [\d]*:[\d]*:[\d]*) \w+ -- Started GET ') != nil)
if !begin_flag
begin_flag = true
paragraph.concat(line)
else
# 另一个paragraph的开头
if (paragraph.match(response_regex) != nil)
csv << get_status(paragraph)
end
begin_flag = true
paragraph = line
end
# Time 2015-01-26 16:41:51
# Author lizanle
# Description 分析日志
public
def analysis day
csv = Array.new
#如果文件不存在,则直接返回空数组
if File::exists?(logfile_path day)
File.open(logfile_path(day), "r:utf-8") do |file|
paragraph = ""
begin_flag = false
# 对每一行进行判断
file.each do |line|
# 以"Started GET "开头为一个paragraph
#print(line.match('[\d]*-([\d]*-[\d]* [\d]*:[\d]*:[\d]*) INFO -- Started GET ') == nil)
if (line.match('[\d]*-([\d]*-[\d]* [\d]*:[\d]*:[\d]*) \w+ -- Started GET ') != nil)
if !begin_flag
begin_flag = true
paragraph.concat(line)
else
if begin_flag
paragraph.concat(line)
else
# 另一个paragraph的开头
if (paragraph.match(response_regex) != nil)
csv << get_status(paragraph)
end
begin_flag = true
paragraph = line
end
else
if begin_flag
paragraph.concat(line)
else
end
end
end
end
end
csv
end
csv
end
end
end
end