#!/usr/bin/python3 # This script is still used in 2024 # get-www-stats - Debian web site popularity statistics # Copyright 2010 Marcin Owsiany # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # This program is run from debwww crontab on Debian website master server. try: import json except ImportError: import simplejson as json from gzip import open as gzopen from glob import glob from collections import defaultdict import logging import os import re import sys #logging.basicConfig(level=logging.INFO) log_files = glob('/srv/weblogs.debian.org/incoming/*.debian.org/www.debian.org-access.log*') logs = [] for f in log_files: parts = os.path.split(f)[-1].split('-') if len(parts) == 2: logs.append((99999999, f, False)) elif len(parts) == 3: if f.endswith('.gz'): gzipped = True stamp = parts[2][:-3] else: gzipped = False stamp = parts[2] logs.append((int(stamp), f, gzipped)) else: logging.warn('Skipping unexpected filename [%s].' % f) counts = defaultdict(int) for n, logfile, gzipped in sorted(logs): logging.info('Reading %s.' % logfile) opener = gzipped and gzopen or open for line in opener(logfile,mode='rt'): tokens = line.split(maxsplit=9) if tokens[8] != '200': continue if tokens[5] != '"GET': continue url = tokens[6] url = re.sub(r'#.*$', '', url) url = re.sub(r'\?.*$', '', url) url = re.sub(r'//+', '/', url) url = re.sub(r'/(\./)+', '/', url) url = re.sub(r'^/\.\./', '/', url) url = re.sub(r'/[^./]*/\.\./', '/', url) url = re.sub(r'\.([a-z]{2}|[a-z]{2}-[a-z]{2})\.(html|xml|rdf|pdf)$', '', url) url = re.sub(r'\.(html|xml|rdf|pdf)(\.([a-z]{2}|[a-z]{2}-[a-z]{2}))?$', '', url) url = re.sub(r'/$', '/index', url) counts[url] += 1 if '/index' not in counts: raise Exception('No data for /index') elif counts['/index'] < 50000: logging.warn('Less than 50k hits for /index') elif counts['/index'] < 10000: raise Exception('Less than 10k hits for /index') json.dump(sorted([(v, k) for (k, v) in counts.items() if v > 2], reverse=True), sys.stdout, indent=2) # for v, k in sorted([(v, k) for (k, v) in counts.iteritems()], reverse=True): # print '%8d %s' % (v, k) # if v < 3: # break # Perl original: # @f=split; # $s = $f[6]; # $s =~ s,\...\.html,,; # $s =~ s,/$,/index,; # $S{$s} += 1; # END{ # printf "%d normalized URLs\n", scalar keys %S; # foreach my $k (sort { $S{$b} <=> $S{$a} } keys %S) { # printf "%8d %s\n", $S{$k}, $k # } # }