Commit 6f5ae03a authored by Andreas Jung's avatar Andreas Jung

removed old Medusa release

parent 6786b136
# Make medusa into a package
__version__='$Revision: 1.5 $'[11:-2]
This diff is collapsed.
# -*- Mode: Python; tab-width: 4 -*-
# It is tempting to add an __int__ method to this class, but it's not
# a good idea. This class tries to gracefully handle integer
# overflow, and to hide this detail from both the programmer and the
# user. Note that the __str__ method can be relied on for printing out
# the value of a counter:
#
# >>> print 'Total Client: %s' % self.total_clients
#
# If you need to do arithmetic with the value, then use the 'as_long'
# method, the use of long arithmetic is a reminder that the counter
# will overflow.
class counter:
"general-purpose counter"
def __init__ (self, initial_value=0):
self.value = initial_value
def increment (self, delta=1):
result = self.value
try:
self.value = self.value + delta
except OverflowError:
self.value = long(self.value) + delta
return result
def decrement (self, delta=1):
result = self.value
try:
self.value = self.value - delta
except OverflowError:
self.value = long(self.value) - delta
return result
def as_long (self):
return long(self.value)
def __nonzero__ (self):
return self.value != 0
def __repr__ (self):
return '<counter value=%s at %x>' % (self.value, id(self))
def __str__ (self):
return str(long(self.value))[:-1]
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID = '$Id: default_handler.py,v 1.4 2000/06/02 14:22:48 brian Exp $'
# standard python modules
import os
import regex
import posixpath
import stat
import string
import time
# medusa modules
import http_date
import http_server
import mime_type_table
import status_handler
import producers
# from <lib/urllib.py>
_quoteprog = regex.compile('%[0-9a-fA-F][0-9a-fA-F]')
def unquote(s):
i = 0
n = len(s)
res = []
while 0 <= i < n:
j = _quoteprog.search(s, i)
if j < 0:
res.append(s[i:])
break
res.append(s[i:j] + chr(string.atoi(s[j+1:j+3], 16)))
i = j+3
return string.join (res, '')
# split a uri
# <path>;<params>?<query>#<fragment>
path_regex = regex.compile (
# path params query fragment
'\\([^;?#]*\\)\\(;[^?#]*\\)?\\(\\?[^#]*\)?\(#.*\)?'
)
def split_path (path):
if path_regex.match (path) != len(path):
raise ValueError, "bad path"
else:
return map (lambda i,r=path_regex: r.group(i), range(1,5))
# This is the 'default' handler. it implements the base set of
# features expected of a simple file-delivering HTTP server. file
# services are provided through a 'filesystem' object, the very same
# one used by the FTP server.
#
# You can replace or modify this handler if you want a non-standard
# HTTP server. You can also derive your own handler classes from
# it.
#
# support for handling POST requests is available in the derived
# class <default_with_post_handler>, defined below.
#
from counter import counter
class default_handler:
valid_commands = ['get', 'head']
IDENT = 'Default HTTP Request Handler'
# Pathnames that are tried when a URI resolves to a directory name
directory_defaults = [
'index.html',
'default.html'
]
default_file_producer = producers.file_producer
def __init__ (self, filesystem):
self.filesystem = filesystem
# count total hits
self.hit_counter = counter()
# count file deliveries
self.file_counter = counter()
# count cache hits
self.cache_counter = counter()
hit_counter = 0
def __repr__ (self):
return '<%s (%s hits) at %x>' % (
self.IDENT,
self.hit_counter,
id (self)
)
# always match, since this is a default
def match (self, request):
return 1
# handle a file request, with caching.
def handle_request (self, request):
if request.command not in self.valid_commands:
request.error (400) # bad request
return
self.hit_counter.increment()
[path, params, query, fragment] = split_path (request.uri)
# unquote path if necessary (thanks to Skip Montaro for pointing
# out that we must unquote in piecemeal fashion).
if '%' in path:
path = unquote (path)
# strip off all leading slashes
while path and path[0] == '/':
path = path[1:]
if self.filesystem.isdir (path):
if path and path[-1] != '/':
request['Location'] = 'http://%s/%s/' % (
request.channel.server.server_name,
path
)
request.error (301)
return
# we could also generate a directory listing here,
# may want to move this into another method for that
# purpose
found = 0
if path and path[-1] != '/':
path = path + '/'
for default in self.directory_defaults:
p = path + default
if self.filesystem.isfile (p):
path = p
found = 1
break
if not found:
request.error (404) # Not Found
return
elif not self.filesystem.isfile (path):
request.error (404) # Not Found
return
file_length = self.filesystem.stat (path)[stat.ST_SIZE]
ims = get_header (IF_MODIFIED_SINCE, request.header)
length_match = 1
if ims:
length = IF_MODIFIED_SINCE.group(4)
if length:
try:
length = string.atoi (length)
if length != file_length:
length_match = 0
except:
pass
ims_date = 0
if ims:
ims_date = http_date.parse_http_date (ims)
try:
mtime = self.filesystem.stat (path)[stat.ST_MTIME]
except:
request.error (404)
return
if length_match and ims_date:
if mtime <= ims_date:
request.reply_code = 304
request.done()
self.cache_counter.increment()
return
try:
file = self.filesystem.open (path, 'rb')
except IOError:
request.error (404)
return
request['Last-Modified'] = http_date.build_http_date (mtime)
request['Content-Length'] = file_length
self.set_content_type (path, request)
if request.command == 'get':
request.push (self.default_file_producer (file))
self.file_counter.increment()
request.done()
def set_content_type (self, path, request):
ext = string.lower (get_extension (path))
if mime_type_table.content_type_map.has_key (ext):
request['Content-Type'] = mime_type_table.content_type_map[ext]
else:
# TODO: test a chunk off the front of the file for 8-bit
# characters, and use application/octet-stream instead.
request['Content-Type'] = 'text/plain'
def status (self):
return producers.simple_producer (
'<li>%s' % status_handler.html_repr (self)
+ '<ul>'
+ ' <li><b>Total Hits:</b> %s' % self.hit_counter
+ ' <li><b>Files Delivered:</b> %s' % self.file_counter
+ ' <li><b>Cache Hits:</b> %s' % self.cache_counter
+ '</ul>'
)
ACCEPT = regex.compile ('Accept: \(.*\)', regex.casefold)
# HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
# to this header. I suppose it's purpose is to avoid the overhead
# of parsing dates...
IF_MODIFIED_SINCE = regex.compile (
'If-Modified-Since: \([^;]+\)\(\(; length=\([0-9]+\)$\)\|$\)',
regex.casefold
)
USER_AGENT = regex.compile ('User-Agent: \(.*\)', regex.casefold)
boundary_chars = "A-Za-z0-9'()+_,./:=?-"
CONTENT_TYPE = regex.compile (
'Content-Type: \([^;]+\)\(\(; boundary=\([%s]+\)$\)\|$\)' % boundary_chars,
regex.casefold
)
get_header = http_server.get_header
def get_extension (path):
dirsep = string.rfind (path, '/')
dotsep = string.rfind (path, '.')
if dotsep > dirsep:
return path[dotsep+1:]
else:
return ''
This diff is collapsed.
This diff is collapsed.
# -*- Mode: Python; tab-width: 4 -*-
import regex
import string
import time
def concat (*args):
return string.joinfields (args, '')
def join (seq, field=' '):
return string.joinfields (seq, field)
def group (s):
return '\\(' + s + '\\)'
short_days = ['sun','mon','tue','wed','thu','fri','sat']
long_days = ['sunday','monday','tuesday','wednesday','thursday','friday','saturday']
short_day_reg = group (join (short_days, '\\|'))
long_day_reg = group (join (long_days, '\\|'))
daymap = {}
for i in range(7):
daymap[short_days[i]] = i
daymap[long_days[i]] = i
hms_reg = join (3 * [group('[0-9][0-9]')], ':')
months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
monmap = {}
for i in range(12):
monmap[months[i]] = i+1
months_reg = group (join (months, '\\|'))
# From draft-ietf-http-v11-spec-07.txt/3.3.1
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
# rfc822 format
rfc822_date = join (
[concat (short_day_reg,','), # day
group('[0-9][0-9]?'), # date
months_reg, # month
group('[0-9]+'), # year
hms_reg, # hour minute second
'gmt'
],
' '
)
rfc822_reg = regex.compile (rfc822_date)
def unpack_rfc822 ():
g = rfc822_reg.group
a = string.atoi
return (
a(g(4)), # year
monmap[g(3)], # month
a(g(2)), # day
a(g(5)), # hour
a(g(6)), # minute
a(g(7)), # second
0,
0,
0
)
# rfc850 format
rfc850_date = join (
[concat (long_day_reg,','),
join (
[group ('[0-9][0-9]?'),
months_reg,
group ('[0-9]+')
],
'-'
),
hms_reg,
'gmt'
],
' '
)
rfc850_reg = regex.compile (rfc850_date)
# they actually unpack the same way
def unpack_rfc850 ():
g = rfc850_reg.group
a = string.atoi
return (
a(g(4)), # year
monmap[g(3)], # month
a(g(2)), # day
a(g(5)), # hour
a(g(6)), # minute
a(g(7)), # second
0,
0,
0
)
# parsdate.parsedate - ~700/sec.
# parse_http_date - ~1333/sec.
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def build_http_date (when):
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
weekdayname[wd],
day, monthname[month], year,
hh, mm, ss)
def parse_http_date (d):
d = string.lower (d)
tz = time.timezone
if rfc850_reg.match (d) == len(d):
retval = int (time.mktime (unpack_rfc850()) - tz)
elif rfc822_reg.match (d) == len(d):
retval = int (time.mktime (unpack_rfc822()) - tz)
else:
return 0
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
retval = retval + (tz - time.altzone)
return retval
This diff is collapsed.
# -*- Mode: Python; tab-width: 4 -*-
import asynchat
import socket
import string
import time # these three are for the rotating logger
import os # |
import stat # v
#
# three types of log:
# 1) file
# with optional flushing. Also, one that rotates the log.
# 2) socket
# dump output directly to a socket connection. [how do we
# keep it open?]
# 3) syslog
# log to syslog via tcp. this is a per-line protocol.
#
#
# The 'standard' interface to a logging object is simply
# log_object.log (message)
#
# a file-like object that captures output, and
# makes sure to flush it always... this could
# be connected to:
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
class file_logger:
# pass this either a path or a file object.
def __init__ (self, file, flush=1, mode='a'):
if type(file) == type(''):
if (file == '-'):
import sys
self.file = sys.stdout
else:
self.file = open (file, mode)
else:
self.file = file
self.do_flush = flush
def __repr__ (self):
return '<file logger: %s>' % self.file
def write (self, data):
self.file.write (data)
self.maybe_flush()
def writeline (self, line):
self.file.writeline (line)
self.maybe_flush()
def writelines (self, lines):
self.file.writelines (lines)
self.maybe_flush()
def maybe_flush (self):
if self.do_flush:
self.file.flush()
def flush (self):
self.file.flush()
def softspace (self, *args):
pass
def log (self, message):
if message[-1] not in ('\r', '\n'):
self.write (message + '\n')
else:
self.write (message)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
# it backs up the log and starts a new one. Note that backing
# up the log is done via "mv" because anything else (cp, gzip)
# would take time, during which medusa would do nothing else.
class rotating_file_logger (file_logger):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
self.filename = file
self.mode = mode
self.file = open (file, mode)
self.freq = freq
self.maxsize = maxsize
self.rotate_when = self.next_backup(self.freq)
self.do_flush = flush
def __repr__ (self):
return '<rotating-file logger: %s>' % self.file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def next_backup (self, freq):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
if freq == 'daily':
return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
elif freq == 'weekly':
return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
elif freq == 'monthly':
return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
else:
return None # not a date-based backup
def maybe_flush (self): # rotate first if necessary
self.maybe_rotate()
if self.do_flush: # from file_logger()
self.file.flush()
def maybe_rotate (self):
if self.freq and time.time() > self.rotate_when:
self.rotate()
self.rotate_when = self.next_backup(self.freq)
elif self.maxsize: # rotate when we get too big
try:
if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
self.rotate()
except os.error: # file not found, probably
self.rotate() # will create a new file
def rotate (self):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
try:
self.file.close()
newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
try:
open(newname, "r").close() # check if file exists
newname = newname + "-%02d%02d%02d" % (hr, min, sec)
except: # YEARMODY is unique
pass
os.rename(self.filename, newname)
self.file = open(self.filename, self.mode)
except:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
# TODO: a simple safety wrapper that will ensure that the line sent
# to syslog is reasonable.
# TODO: async version of syslog_client: now, log entries use blocking
# send()
import m_syslog
syslog_logger = m_syslog.syslog_client
class syslog_logger (m_syslog.syslog_client):
svc_name='medusa'
pid_str =str(os.getpid())
def __init__ (self, address, facility='user'):
m_syslog.syslog_client.__init__ (self, address)
self.facility = m_syslog.facility_names[facility]
self.address=address
def __repr__ (self):
return '<syslog logger address=%s>' % (repr(self.address))
def log (self, message):
m_syslog.syslog_client.log (
self,
'%s[%s]: %s' % (self.svc_name, self.pid_str, message),
facility=self.facility,
priority=m_syslog.LOG_INFO
)
# log to a stream socket, asynchronously
class socket_logger (asynchat.async_chat):
def __init__ (self, address):
if type(address) == type(''):
self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
else:
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.connect (address)
self.address = address
def __repr__ (self):
return '<socket logger: address=%s>' % (self.address)
def log (self, message):
if message[-2:] != '\r\n':
self.socket.push (message + '\r\n')
else:
self.socket.push (message)
# log to multiple places
class multi_logger:
def __init__ (self, loggers):
self.loggers = loggers
def __repr__ (self):
return '<multi logger: %s>' % (repr(self.loggers))
def log (self, message):
for logger in self.loggers:
logger.log (message)
class resolving_logger:
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def __init__ (self, resolver, logger):
self.resolver = resolver
self.logger = logger
class logger_thunk:
def __init__ (self, message, logger):
self.message = message
self.logger = logger
def __call__ (self, host, ttl, answer):
if not answer:
answer = host
self.logger.log ('%s:%s' % (answer, self.message))
def log (self, ip, message):
self.resolver.resolve_ptr (
ip,
self.logger_thunk (
message,
self.logger
)
)
class unresolving_logger:
"Just in case you don't want to resolve"
def __init__ (self, logger):
self.logger = logger
def log (self, ip, message):
self.logger.log ('%s:%s' % (ip, message))
def strip_eol (line):
while line and line[-1] in '\r\n':
line = line[:-1]
return line
class tail_logger:
"Keep track of the last <size> log messages"
def __init__ (self, logger, size=500):
self.size = size
self.logger = logger
self.messages = []
def log (self, message):
self.messages.append (strip_eol (message))
if len (self.messages) > self.size:
del self.messages[0]
self.logger.log (message)
# -*- Mode: Python; tab-width: 4 -*-
# ======================================================================
# Copyright 1997 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""socket interface to unix syslog.
On Unix, there are usually two ways of getting to syslog: via a
local unix-domain socket, or via the TCP service.
Usually "/dev/log" is the unix domain socket. This may be different
for other systems.
>>> my_client = syslog_client ('/dev/log')
Otherwise, just use the UDP version, port 514.
>>> my_client = syslog_client (('my_log_host', 514))
On win32, you will have to use the UDP version. Note that
you can use this to log to other hosts (and indeed, multiple
hosts).
This module is not a drop-in replacement for the python
<syslog> extension module - the interface is different.
Usage:
>>> c = syslog_client()
>>> c = syslog_client ('/strange/non_standard_log_location')
>>> c = syslog_client (('other_host.com', 514))
>>> c.log ('testing', facility='local0', priority='debug')
"""
# TODO: support named-pipe syslog.
# [see ftp://sunsite.unc.edu/pub/Linux/system/Daemons/syslog-fifo.tar.z]
# from <linux/sys/syslog.h>:
# ===========================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where the
# bottom 3 bits are the priority (0-7) and the top 28 bits are the facility
# (0-big number). Both the priorities and the facilities map roughly
# one-to-one to strings in the syslogd(8) source code. This mapping is
# included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
import socket
class syslog_client:
def __init__ (self, address='/dev/log'):
self.address = address
if type (address) == type(''):
try: # APUE 13.4.2 specifes /dev/log as datagram socket
self.socket = socket.socket( socket.AF_UNIX
, socket.SOCK_DGRAM)
self.socket.connect (address)
except: # older linux may create as stream socket
self.socket = socket.socket( socket.AF_UNIX
, socket.SOCK_STREAM)
self.socket.connect (address)
self.unix = 1
else:
self.socket = socket.socket( socket.AF_INET
, socket.SOCK_DGRAM)
self.unix = 0
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def log (self, message, facility=LOG_USER, priority=LOG_INFO):
message = self.log_format_string % (
self.encode_priority (facility, priority),
message
)
if self.unix:
self.socket.send (message)
else:
self.socket.sendto (message, self.address)
def encode_priority (self, facility, priority):
if type(facility) == type(''):
facility = facility_names[facility]
if type(priority) == type(''):
priority = priority_names[priority]
return (facility<<3) | priority
def close (self):
if self.unix:
self.socket.close()
if __name__ == '__main__':
"""
Unit test for syslog_client. Set up for the test by:
* tail -f /var/log/allstuf (to see the "normal" log messages).
* Running the test_logger.py script with a junk file name (which
will be opened as a Unix-domain socket). "Custom" log messages
will go here.
* Run this script, passing the same junk file name.
* Check that the "bogus" test throws, and that none of the rest do.
* Check that the 'default' and 'UDP' messages show up in the tail.
* Check that the 'non-std' message shows up in the test_logger
console.
* Finally, kill off the tail and test_logger, and clean up the
socket file.
"""
import sys, traceback
if len( sys.argv ) != 2:
print "Usage: syslog.py localSocketFilename"
sys.exit()
def test_client( desc, address=None ):
try:
if address:
client = syslog_client( address )
else:
client = syslog_client()
except:
print 'syslog_client() [%s] ctor threw' % desc
traceback.print_exc()
return
try:
client.log( 'testing syslog_client() [%s]' % desc
, facility='local0', priority='debug' )
print 'syslog_client.log() [%s] did not throw' % desc
except:
print 'syslog_client.log() [%s] threw' % desc
traceback.print_exc()
test_client( 'default' )
test_client( 'bogus file', '/some/bogus/logsocket' )
test_client( 'nonstd file', sys.argv[1] )
test_client( 'UDP', ('localhost', 514) )
import socket
import select
# several factors here we might want to test:
# 1) max we can create
# 2) max we can bind
# 3) max we can listen on
# 4) max we can connect
def max_server_sockets():
sl = []
while 1:
try:
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.bind (('',0))
s.listen(5)
sl.append (s)
except:
break
num = len(sl)
for s in sl:
s.close()
del sl
return num
def max_client_sockets():
# make a server socket
server = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
server.bind (('', 9999))
server.listen (5)
sl = []
while 1:
try:
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.connect (('', 9999))
conn, addr = server.accept()
sl.append ((s,conn))
except:
break
num = len(sl)
for s,c in sl:
s.close()
c.close()
del sl
return num
def max_select_sockets():
sl = []
while 1:
try:
num = len(sl)
for i in range(1 + len(sl) * 0.05):
# Increase exponentially.
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.bind (('',0))
s.listen(5)
sl.append (s)
select.select(sl,[],[],0)
except:
break
for s in sl:
s.close()
del sl
return num
# -*- Mode: Python -*-
# the medusa icon as a python source file.
width = 97
height = 61
data = 'GIF89aa\000=\000\204\000\000\000\000\000\255\255\255\245\245\245ssskkkccc111)))\326\326\326!!!\316\316\316\300\300\300\204\204\000\224\224\224\214\214\214\200\200\200RRR\377\377\377JJJ\367\367\367BBB\347\347\347\000\204\000\020\020\020\265\265\265\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000!\371\004\001\000\000\021\000,\000\000\000\000a\000=\000\000\005\376`$\216di\236h\252\256l\353\276p,\317tm\337x\256\357|m\001@\240E\305\000\364\2164\206R)$\005\201\214\007r\012{X\255\312a\004\260\\>\026\3240\353)\224n\001W+X\334\373\231~\344.\303b\216\024\027x<\273\307\255G,rJiWN\014{S}k"?ti\013EdPQ\207G@_%\000\026yy\\\201\202\227\224<\221Fs$pOjWz\241<r@vO\236\231\233k\247M\2544\203F\177\235\236L#\247\256Z\270,\266BxJ[\276\256A]iE\304\305\262\273E\313\201\275i#\\\303\321\'h\203V\\\177\326\276\216\220P~\335\230_\264\013\342\275\344KF\233\360Q\212\352\246\000\367\274s\361\236\334\347T\341;\341\246\2202\177\3142\211`\242o\325@S\202\264\031\252\207\260\323\256\205\311\036\236\270\002\'\013\302\177\274H\010\324X\002\0176\212\037\376\321\360\032\226\207\244\2674(+^\202\346r\205J\0211\375\241Y#\256f\0127\315>\272\002\325\307g\012(\007\205\312#j\317(\012A\200\224.\241\003\346GS\247\033\245\344\264\366\015L\'PXQl]\266\263\243\232\260?\245\316\371\362\225\035\332\243J\273\332Q\263\357-D\241T\327\270\265\013W&\330\010u\371b\322IW0\214\261]\003\033Va\365Z#\207\213a\030k\2647\262\014p\354\024[n\321N\363\346\317\003\037P\000\235C\302\000\3228(\244\363YaA\005\022\255_\237@\260\000A\212\326\256qbp\321\332\266\011\334=T\023\010"!B\005\003A\010\224\020\220 H\002\337#\020 O\276E\357h\221\327\003\\\000b@v\004\351A.h\365\354\342B\002\011\257\025\\ \220\340\301\353\006\000\024\214\200pA\300\353\012\364\241k/\340\033C\202\003\000\310fZ\011\003V\240R\005\007\354\376\026A\000\000\360\'\202\177\024\004\210\003\000\305\215\360\000\000\015\220\240\332\203\027@\'\202\004\025VpA\000%\210x\321\206\032J\341\316\010\262\211H"l\333\341\200\200>"]P\002\212\011\010`\002\0066FP\200\001\'\024p]\004\027(8B\221\306]\000\201w>\002iB\001\007\340\260"v7J1\343(\257\020\251\243\011\242i\263\017\215\337\035\220\200\221\365m4d\015\016D\251\341iN\354\346Ng\253\200I\240\031\35609\245\2057\311I\302\2007t\231"&`\314\310\244\011e\226(\236\010w\212\300\234\011\012HX(\214\253\311@\001\233^\222pg{% \340\035\224&H\000\246\201\362\215`@\001"L\340\004\030\234\022\250\'\015(V:\302\235\030\240q\337\205\224\212h@\177\006\000\250\210\004\007\310\207\337\005\257-P\346\257\367]p\353\203\271\256:\203\236\211F\340\247\010\3329g\244\010\307*=A\000\203\260y\012\304s#\014\007D\207,N\007\304\265\027\021C\233\207%B\366[m\353\006\006\034j\360\306+\357\274a\204\000\000;'
# -*- Python -*-
# Converted by ./convert_mime_type_table.py from:
# /usr/src2/apache_1.2b6/conf/mime.types
#
content_type_map = \
{
'ai': 'application/postscript',
'aif': 'audio/x-aiff',
'aifc': 'audio/x-aiff',
'aiff': 'audio/x-aiff',
'au': 'audio/basic',
'avi': 'video/x-msvideo',
'bcpio': 'application/x-bcpio',
'bin': 'application/octet-stream',
'cdf': 'application/x-netcdf',
'class': 'application/octet-stream',
'cpio': 'application/x-cpio',
'cpt': 'application/mac-compactpro',
'csh': 'application/x-csh',
'dcr': 'application/x-director',
'dir': 'application/x-director',
'dms': 'application/octet-stream',
'doc': 'application/msword',
'dvi': 'application/x-dvi',
'dxr': 'application/x-director',
'eps': 'application/postscript',
'etx': 'text/x-setext',
'exe': 'application/octet-stream',
'gif': 'image/gif',
'gtar': 'application/x-gtar',
'gz': 'application/x-gzip',
'hdf': 'application/x-hdf',
'hqx': 'application/mac-binhex40',
'htm': 'text/html',
'html': 'text/html',
'ice': 'x-conference/x-cooltalk',
'ief': 'image/ief',
'jpe': 'image/jpeg',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'kar': 'audio/midi',
'latex': 'application/x-latex',
'lha': 'application/octet-stream',
'lzh': 'application/octet-stream',
'man': 'application/x-troff-man',
'me': 'application/x-troff-me',
'mid': 'audio/midi',
'midi': 'audio/midi',
'mif': 'application/x-mif',
'mov': 'video/quicktime',
'movie': 'video/x-sgi-movie',
'mp2': 'audio/mpeg',
'mpe': 'video/mpeg',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
'mpga': 'audio/mpeg',
'mp3': 'audio/mpeg',
'ms': 'application/x-troff-ms',
'nc': 'application/x-netcdf',
'oda': 'application/oda',
'pbm': 'image/x-portable-bitmap',
'pdb': 'chemical/x-pdb',
'pdf': 'application/pdf',
'pgm': 'image/x-portable-graymap',
'png': 'image/png',
'pnm': 'image/x-portable-anymap',
'ppm': 'image/x-portable-pixmap',
'ppt': 'application/powerpoint',
'ps': 'application/postscript',
'qt': 'video/quicktime',
'ra': 'audio/x-realaudio',
'ram': 'audio/x-pn-realaudio',
'ras': 'image/x-cmu-raster',
'rgb': 'image/x-rgb',
'roff': 'application/x-troff',
'rpm': 'audio/x-pn-realaudio-plugin',
'rtf': 'application/rtf',
'rtx': 'text/richtext',
'sgm': 'text/x-sgml',
'sgml': 'text/x-sgml',
'sh': 'application/x-sh',
'shar': 'application/x-shar',
'sit': 'application/x-stuffit',
'skd': 'application/x-koan',
'skm': 'application/x-koan',
'skp': 'application/x-koan',
'skt': 'application/x-koan',
'snd': 'audio/basic',
'src': 'application/x-wais-source',
'sv4cpio': 'application/x-sv4cpio',
'sv4crc': 'application/x-sv4crc',
't': 'application/x-troff',
'tar': 'application/x-tar',
'tcl': 'application/x-tcl',
'tex': 'application/x-tex',
'texi': 'application/x-texinfo',
'texinfo': 'application/x-texinfo',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'tr': 'application/x-troff',
'tsv': 'text/tab-separated-values',
'txt': 'text/plain',
'ustar': 'application/x-ustar',
'vcd': 'application/x-cdlink',
'vrml': 'x-world/x-vrml',
'wav': 'audio/x-wav',
'wrl': 'x-world/x-vrml',
'xbm': 'image/x-xbitmap',
'xpm': 'image/x-xpixmap',
'xwd': 'image/x-xwindowdump',
'xyz': 'chemical/x-pdb',
'zip': 'application/zip',
}
# -*- Mode: Python; tab-width: 4 -*-
# Author: Sam Rushing <rushing@nightmare.com>
#
# python REPL channel.
#
RCS_ID = '$Id: monitor.py,v 1.9 2000/07/21 18:59:08 shane Exp $'
import md5
import socket
import string
import sys
import time
import traceback
VERSION = string.split(RCS_ID)[2]
import asyncore
import asynchat
from counter import counter
import producers
class monitor_channel (asynchat.async_chat):
try_linemode = 1
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\r\n')
self.data = ''
# local bindings specific to this channel
self.local_env = sys.modules['__main__'].__dict__.copy()
self.push ('Python ' + sys.version + '\r\n')
self.push (sys.copyright+'\r\n')
self.push ('Welcome to %s\r\n' % self)
self.push ("[Hint: try 'from __main__ import *']\r\n")
self.prompt()
self.number = server.total_sessions.as_long()
self.line_counter = counter()
self.multi_line = []
def handle_connect (self):
# send IAC DO LINEMODE
self.push ('\377\375\"')
def close (self):
self.server.closed_sessions.increment()
asynchat.async_chat.close(self)
def prompt (self):
self.push ('>>> ')
def collect_incoming_data (self, data):
self.data = self.data + data
if len(self.data) > 1024:
# denial of service.
self.push ('BCNU\r\n')
self.close_when_done()
def found_terminator (self):
line = self.clean_line (self.data)
self.data = ''
self.line_counter.increment()
# check for special case inputs...
if not line and not self.multi_line:
self.prompt()
return
if line in ['\004', 'exit']:
self.push ('BCNU\r\n')
self.close_when_done()
return
oldout = sys.stdout
olderr = sys.stderr
try:
p = output_producer(self, olderr)
sys.stdout = p
sys.stderr = p
try:
# this is, of course, a blocking operation.
# if you wanted to thread this, you would have
# to synchronize, etc... and treat the output
# like a pipe. Not Fun.
#
# try eval first. If that fails, try exec. If that fails,
# hurl.
try:
if self.multi_line:
# oh, this is horrible...
raise SyntaxError
co = compile (line, repr(self), 'eval')
result = eval (co, self.local_env)
method = 'eval'
if result is not None:
print repr(result)
self.local_env['_'] = result
except SyntaxError:
try:
if self.multi_line:
if line and line[0] in [' ','\t']:
self.multi_line.append (line)
self.push ('... ')
return
else:
self.multi_line.append (line)
line = string.join (self.multi_line, '\n')
co = compile (line, repr(self), 'exec')
self.multi_line = []
else:
co = compile (line, repr(self), 'exec')
except SyntaxError, why:
if why[0] == 'unexpected EOF while parsing':
self.push ('... ')
self.multi_line.append (line)
return
exec co in self.local_env
method = 'exec'
except:
method = 'exception'
self.multi_line = []
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
self.log_info('%s %s %s' %(t, v, tbinfo), 'warning')
traceback.print_exc()
tbinfo = None
finally:
sys.stdout = oldout
sys.stderr = olderr
self.log_info('%s:%s (%s)> %s' % (
self.number,
self.line_counter,
method,
repr(line))
)
self.push_with_producer (p)
self.prompt()
# for now, we ignore any telnet option stuff sent to
# us, and we process the backspace key ourselves.
# gee, it would be fun to write a full-blown line-editing
# environment, etc...
def clean_line (self, line):
chars = []
for ch in line:
oc = ord(ch)
if oc < 127:
if oc in [8,177]:
# backspace
chars = chars[:-1]
else:
chars.append (ch)
return string.join (chars, '')
class monitor_server (asyncore.dispatcher):
SERVER_IDENT = 'Monitor Server (V%s)' % VERSION
channel_class = monitor_channel
def __init__ (self, hostname='127.0.0.1', port=8023):
self.hostname = hostname
self.port = port
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind ((hostname, port))
self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))
self.listen (5)
self.closed = 0
self.failed_auths = 0
self.total_sessions = counter()
self.closed_sessions = counter()
def writable (self):
return 0
def handle_accept (self):
conn, addr = self.accept()
self.log_info('Incoming monitor connection from %s:%d' % addr)
self.channel_class (self, conn, addr)
self.total_sessions.increment()
def status (self):
return producers.simple_producer (
'<h2>%s</h2>' % self.SERVER_IDENT
+ '<br><b>Total Sessions:</b> %s' % self.total_sessions
+ '<br><b>Current Sessions:</b> %d' % (
self.total_sessions.as_long()-self.closed_sessions.as_long()
)
)
def hex_digest (s):
m = md5.md5()
m.update (s)
return string.joinfields (
map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
'',
)
class secure_monitor_channel (monitor_channel):
authorized = 0
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\r\n')
self.data = ''
# local bindings specific to this channel
self.local_env = {}
# send timestamp string
self.timestamp = str(time.time())
self.count = 0
self.line_counter = counter()
self.number = int(server.total_sessions.as_long())
self.multi_line = []
self.push (self.timestamp + '\r\n')
def found_terminator (self):
if not self.authorized:
if hex_digest ('%s%s' % (self.timestamp, self.server.password)) != self.data:
self.log_info ('%s: failed authorization' % self, 'warning')
self.server.failed_auths = self.server.failed_auths + 1
self.close()
else:
self.authorized = 1
self.push ('Python ' + sys.version + '\r\n')
self.push (sys.copyright+'\r\n')
self.push ('Welcome to %s\r\n' % self)
self.prompt()
self.data = ''
else:
monitor_channel.found_terminator (self)
class secure_encrypted_monitor_channel (secure_monitor_channel):
"Wrap send() and recv() with a stream cipher"
def __init__ (self, server, conn, addr):
key = server.password
self.outgoing = server.cipher.new (key)
self.incoming = server.cipher.new (key)
secure_monitor_channel.__init__ (self, server, conn, addr)
def send (self, data):
# send the encrypted data instead
ed = self.outgoing.encrypt (data)
return secure_monitor_channel.send (self, ed)
def recv (self, block_size):
data = secure_monitor_channel.recv (self, block_size)
if data:
dd = self.incoming.decrypt (data)
return dd
else:
return data
class secure_monitor_server (monitor_server):
channel_class = secure_monitor_channel
def __init__ (self, password, hostname='', port=8023):
monitor_server.__init__ (self, hostname, port)
self.password = password
def status (self):
p = monitor_server.status (self)
# kludge
p.data = p.data + ('<br><b>Failed Authorizations:</b> %d' % self.failed_auths)
return p
# don't try to print from within any of the methods
# of this object. 8^)
class output_producer:
def __init__ (self, channel, real_stderr):
self.channel = channel
self.data = ''
# use _this_ for debug output
self.stderr = real_stderr
def check_data (self):
if len(self.data) > 1<<16:
# runaway output, close it.
self.channel.close()
def write (self, data):
lines = string.splitfields (data, '\n')
data = string.join (lines, '\r\n')
self.data = self.data + data
self.check_data()
def writeline (self, line):
self.data = self.data + line + '\r\n'
self.check_data()
def writelines (self, lines):
self.data = self.data + string.joinfields (
lines,
'\r\n'
) + '\r\n'
self.check_data()
def ready (self):
return (len (self.data) > 0)
def flush (self):
pass
def softspace (self, *args):
pass
def more (self):
if self.data:
result = self.data[:512]
self.data = self.data[512:]
return result
else:
return ''
if __name__ == '__main__':
import string
import sys
if '-s' in sys.argv:
sys.argv.remove ('-s')
print 'Enter password: ',
password = raw_input()
else:
password = None
if '-e' in sys.argv:
sys.argv.remove ('-e')
encrypt = 1
else:
encrypt = 0
print sys.argv
if len(sys.argv) > 1:
port = string.atoi (sys.argv[1])
else:
port = 8023
if password is not None:
s = secure_monitor_server (password, '', port)
if encrypt:
s.channel_class = secure_encrypted_monitor_channel
import sapphire
s.cipher = sapphire
else:
s = monitor_server ('', port)
asyncore.loop()
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, unix version.
import asyncore
import asynchat
import regsub
import socket
import string
import sys
import os
import md5
import time
class stdin_channel (asyncore.file_dispatcher):
def handle_read (self):
data = self.recv(512)
if not data:
print '\nclosed.'
self.sock_channel.close()
try:
self.close()
except:
pass
data = regsub.gsub ('\n', '\r\n', data)
self.sock_channel.push (data)
def writable (self):
return 0
def log (self, *ignore):
pass
class monitor_client (asynchat.async_chat):
def __init__ (self, password, addr=('',8023), socket_type=socket.AF_INET):
asynchat.async_chat.__init__ (self)
self.create_socket (socket_type, socket.SOCK_STREAM)
self.terminator = '\r\n'
self.connect (addr)
self.sent_auth = 0
self.timestamp = ''
self.password = password
def collect_incoming_data (self, data):
if not self.sent_auth:
self.timestamp = self.timestamp + data
else:
sys.stdout.write (data)
sys.stdout.flush()
def found_terminator (self):
if not self.sent_auth:
self.push (hex_digest (self.timestamp + self.password) + '\r\n')
self.sent_auth = 1
else:
print
def handle_close (self):
# close all the channels, which will make the standard main
# loop exit.
map (lambda x: x.close(), asyncore.socket_map.values())
def log (self, *ignore):
pass
class encrypted_monitor_client (monitor_client):
"Wrap push() and recv() with a stream cipher"
def init_cipher (self, cipher, key):
self.outgoing = cipher.new (key)
self.incoming = cipher.new (key)
def push (self, data):
# push the encrypted data instead
return monitor_client.push (self, self.outgoing.encrypt (data))
def recv (self, block_size):
data = monitor_client.recv (self, block_size)
if data:
return self.incoming.decrypt (data)
else:
return data
def hex_digest (s):
m = md5.md5()
m.update (s)
return string.join (
map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
'',
)
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Usage: %s host port' % sys.argv[0]
sys.exit(0)
if ('-e' in sys.argv):
encrypt = 1
sys.argv.remove ('-e')
else:
encrypt = 0
sys.stderr.write ('Enter Password: ')
sys.stderr.flush()
import os
try:
os.system ('stty -echo')
p = raw_input()
print
finally:
os.system ('stty echo')
stdin = stdin_channel (0)
if len(sys.argv) > 1:
if encrypt:
client = encrypted_monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
import sapphire
client.init_cipher (sapphire, p)
else:
client = monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))
else:
# default to local host, 'standard' port
client = monitor_client (p)
stdin.sock_channel = client
asyncore.loop()
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, win32 version
# since we can't do select() on stdin/stdout, we simply
# use threads and blocking sockets. <sigh>
import regsub
import socket
import string
import sys
import thread
import md5
def hex_digest (s):
m = md5.md5()
m.update (s)
return string.join (
map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
'',
)
def reader (lock, sock, password):
# first grab the timestamp
ts = sock.recv (1024)[:-2]
sock.send (hex_digest (ts+password) + '\r\n')
while 1:
d = sock.recv (1024)
if not d:
lock.release()
print 'Connection closed. Hit <return> to exit'
thread.exit()
sys.stdout.write (d)
sys.stdout.flush()
def writer (lock, sock, barrel="just kidding"):
while lock.locked():
sock.send (
sys.stdin.readline()[:-1] + '\r\n'
)
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Usage: %s host port'
sys.exit(0)
print 'Enter Password: ',
p = raw_input()
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.connect ((sys.argv[1], string.atoi(sys.argv[2])))
l = thread.allocate_lock()
l.acquire()
thread.start_new_thread (reader, (l, s, p))
writer (l, s)
# -*- Mode: Python; tab-width: 4 -*-
RCS_ID = '$Id: producers.py,v 1.7 2000/06/02 14:22:48 brian Exp $'
import string
"""
A collection of producers.
Each producer implements a particular feature: They can be combined
in various ways to get interesting and useful behaviors.
For example, you can feed dynamically-produced output into the compressing
producer, then wrap this with the 'chunked' transfer-encoding producer.
"""
class simple_producer:
"producer for a string"
def __init__ (self, data, buffer_size=1024):
self.data = data
self.buffer_size = buffer_size
def more (self):
if len (self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = ''
return result
class scanning_producer:
"like simple_producer, but more efficient for large strings"
def __init__ (self, data, buffer_size=1024):
self.data = data
self.buffer_size = buffer_size
self.pos = 0
def more (self):
if self.pos < len(self.data):
lp = self.pos
rp = min (
len(self.data),
self.pos + self.buffer_size
)
result = self.data[lp:rp]
self.pos = self.pos + len(result)
return result
else:
return ''
class lines_producer:
"producer for a list of lines"
def __init__ (self, lines):
self.lines = lines
def ready (self):
return len(self.lines)
def more (self):
if self.lines:
chunk = self.lines[:50]
self.lines = self.lines[50:]
return string.join (chunk, '\r\n') + '\r\n'
else:
return ''
class buffer_list_producer:
"producer for a list of buffers"
# i.e., data == string.join (buffers, '')
def __init__ (self, buffers):
self.index = 0
self.buffers = buffers
def more (self):
if self.index >= len(self.buffers):
return ''
else:
data = self.buffers[self.index]
self.index = self.index + 1
return data
class file_producer:
"producer wrapper for file[-like] objects"
# match http_channel's outgoing buffer size
out_buffer_size = 1<<16
def __init__ (self, file):
self.done = 0
self.file = file
def more (self):
if self.done:
return ''
else:
data = self.file.read (self.out_buffer_size)
if not data:
self.file.close()
del self.file
self.done = 1
return ''
else:
return data
# A simple output producer. This one does not [yet] have
# the safety feature builtin to the monitor channel: runaway
# output will not be caught.
# don't try to print from within any of the methods
# of this object.
class output_producer:
"Acts like an output file; suitable for capturing sys.stdout"
def __init__ (self):
self.data = ''
def write (self, data):
lines = string.splitfields (data, '\n')
data = string.join (lines, '\r\n')
self.data = self.data + data
def writeline (self, line):
self.data = self.data + line + '\r\n'
def writelines (self, lines):
self.data = self.data + string.joinfields (
lines,
'\r\n'
) + '\r\n'
def ready (self):
return (len (self.data) > 0)
def flush (self):
pass
def softspace (self, *args):
pass
def more (self):
if self.data:
result = self.data[:512]
self.data = self.data[512:]
return result
else:
return ''
class composite_producer:
"combine a fifo of producers into one"
def __init__ (self, producers):
self.producers = producers
def more (self):
while len(self.producers):
p = self.producers.first()
d = p.more()
if d:
return d
else:
self.producers.pop()
else:
return ''
class globbing_producer:
"""
'glob' the output from a producer into a particular buffer size.
helps reduce the number of calls to send(). [this appears to
gain about 30% performance on requests to a single channel]
"""
def __init__ (self, producer, buffer_size=1<<16):
self.producer = producer
self.buffer = ''
self.buffer_size = buffer_size
def more (self):
while len(self.buffer) < self.buffer_size:
data = self.producer.more()
if data:
self.buffer = self.buffer + data
else:
break
r = self.buffer
self.buffer = ''
return r
class hooked_producer:
"""
A producer that will call <function> when it empties,.
with an argument of the number of bytes produced. Useful
for logging/instrumentation purposes.
"""
def __init__ (self, producer, function):
self.producer = producer
self.function = function
self.bytes = 0
def more (self):
if self.producer:
result = self.producer.more()
if not result:
self.producer = None
self.function (self.bytes)
else:
self.bytes = self.bytes + len(result)
return result
else:
return ''
# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
# correct. In the face of Strange Files, it is conceivable that
# reading a 'file' may produce an amount of data not matching that
# reported by os.stat() [text/binary mode issues, perhaps the file is
# being appended to, etc..] This makes the chunked encoding a True
# Blessing, and it really ought to be used even with normal files.
# How beautifully it blends with the concept of the producer.
class chunked_producer:
"""A producer that implements the 'chunked' transfer coding for HTTP/1.1.
Here is a sample usage:
request['Transfer-Encoding'] = 'chunked'
request.push (
producers.chunked_producer (your_producer)
)
request.done()
"""
def __init__ (self, producer, footers=None):
self.producer = producer
self.footers = footers
def more (self):
if self.producer:
data = self.producer.more()
if data:
return '%x\r\n%s\r\n' % (len(data), data)
else:
self.producer = None
if self.footers:
return string.join (
['0'] + self.footers,
'\r\n'
) + '\r\n\r\n'
else:
return '0\r\n\r\n'
else:
return ''
# Unfortunately this isn't very useful right now (Aug 97), because
# apparently the browsers don't do on-the-fly decompression. Which
# is sad, because this could _really_ speed things up, especially for
# low-bandwidth clients (i.e., most everyone).
try:
import zlib
except ImportError:
zlib = None
class compressed_producer:
"""
Compress another producer on-the-fly, using ZLIB
[Unfortunately, none of the current browsers seem to support this]
"""
# Note: It's not very efficient to have the server repeatedly
# compressing your outgoing files: compress them ahead of time, or
# use a compress-once-and-store scheme. However, if you have low
# bandwidth and low traffic, this may make more sense than
# maintaining your source files compressed.
#
# Can also be used for compressing dynamically-produced output.
def __init__ (self, producer, level=5):
self.producer = producer
self.compressor = zlib.compressobj (level)
def more (self):
if self.producer:
cdata = ''
# feed until we get some output
while not cdata:
data = self.producer.more()
if not data:
self.producer = None
return self.compressor.flush()
else:
cdata = self.compressor.compress (data)
return cdata
else:
return ''
class escaping_producer:
"A producer that escapes a sequence of characters"
" Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
def __init__ (self, producer, esc_from='\r\n.', esc_to='\r\n..'):
self.producer = producer
self.esc_from = esc_from
self.esc_to = esc_to
self.buffer = ''
from asynchat import find_prefix_at_end
self.find_prefix_at_end = find_prefix_at_end
def more (self):
esc_from = self.esc_from
esc_to = self.esc_to
buffer = self.buffer + self.producer.more()
if buffer:
buffer = string.replace (buffer, esc_from, esc_to)
i = self.find_prefix_at_end (buffer, esc_from)
if i:
# we found a prefix
self.buffer = buffer[-i:]
return buffer[:-i]
else:
# no prefix, return it all
self.buffer = ''
return buffer
else:
return buffer
This diff is collapsed.
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING = "$Id: select_trigger.py,v 1.14 2000/06/02 14:22:48 brian Exp $"
import asyncore
import asynchat
import os
import socket
import string
import thread
if os.name == 'posix':
class trigger (asyncore.file_dispatcher):
"Wake up a call to select() running in the main thread"
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
def __init__ (self):
r, w = os.pipe()
self.trigger = w
asyncore.file_dispatcher.__init__ (self, r)
self.lock = thread.allocate_lock()
self.thunks = []
def __repr__ (self):
return '<select-trigger (pipe) at %x>' % id(self)
def readable (self):
return 1
def writable (self):
return 0
def handle_connect (self):
pass
def pull_trigger (self, thunk=None):
# print 'PULL_TRIGGER: ', len(self.thunks)
if thunk:
try:
self.lock.acquire()
self.thunks.append (thunk)
finally:
self.lock.release()
os.write (self.trigger, 'x')
def handle_read (self):
self.recv (8192)
try:
self.lock.acquire()
for thunk in self.thunks:
try:
thunk()
except:
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
print 'exception in trigger thunk: (%s:%s %s)' % (t, v, tbinfo)
self.thunks = []
finally:
self.lock.release()
else:
# win32-safe version
class trigger (asyncore.dispatcher):
address = ('127.9.9.9', 19999)
def __init__ (self):
a = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
w = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
# set TCP_NODELAY to true to avoid buffering
w.setsockopt(socket.IPPROTO_TCP, 1, 1)
# tricky: get a pair of connected sockets
host='127.0.0.1'
port=19999
while 1:
try:
self.address=(host, port)
a.bind(self.address)
break
except:
if port <= 19950:
raise 'Bind Error', 'Cannot bind trigger!'
port=port - 1
a.listen (1)
w.setblocking (0)
try:
w.connect (self.address)
except:
pass
r, addr = a.accept()
a.close()
w.setblocking (1)
self.trigger = w
asyncore.dispatcher.__init__ (self, r)
self.lock = thread.allocate_lock()
self.thunks = []
self._trigger_connected = 0
def __repr__ (self):
return '<select-trigger (loopback) at %x>' % id(self)
def readable (self):
return 1
def writable (self):
return 0
def handle_connect (self):
pass
def pull_trigger (self, thunk=None):
if thunk:
try:
self.lock.acquire()
self.thunks.append (thunk)
finally:
self.lock.release()
self.trigger.send ('x')
def handle_read (self):
self.recv (8192)
try:
self.lock.acquire()
for thunk in self.thunks:
try:
thunk()
except:
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
print 'exception in trigger thunk: (%s:%s %s)' % (t, v, tbinfo)
self.thunks = []
finally:
self.lock.release()
the_trigger = None
class trigger_file:
"A 'triggered' file object"
buffer_size = 4096
def __init__ (self, parent):
global the_trigger
if the_trigger is None:
the_trigger = trigger()
self.parent = parent
self.buffer = ''
def write (self, data):
self.buffer = self.buffer + data
if len(self.buffer) > self.buffer_size:
d, self.buffer = self.buffer, ''
the_trigger.pull_trigger (
lambda d=d,p=self.parent: p.push (d)
)
def writeline (self, line):
self.write (line+'\r\n')
def writelines (self, lines):
self.write (
string.joinfields (
lines,
'\r\n'
) + '\r\n'
)
def flush (self):
if self.buffer:
d, self.buffer = self.buffer, ''
the_trigger.pull_trigger (
lambda p=self.parent,d=d: p.push (d)
)
def softspace (self, *args):
pass
def close (self):
# in a derived class, you may want to call trigger_close() instead.
self.flush()
self.parent = None
def trigger_close (self):
d, self.buffer = self.buffer, ''
p, self.parent = self.parent, None
the_trigger.pull_trigger (
lambda p=p,d=d: (p.push(d), p.close_when_done())
)
if __name__ == '__main__':
import time
def thread_function (output_file, i, n):
print 'entering thread_function'
while n:
time.sleep (5)
output_file.write ('%2d.%2d %s\r\n' % (i, n, output_file))
output_file.flush()
n = n - 1
output_file.close()
print 'exiting thread_function'
class thread_parent (asynchat.async_chat):
def __init__ (self, conn, addr):
self.addr = addr
asynchat.async_chat.__init__ (self, conn)
self.set_terminator ('\r\n')
self.buffer = ''
self.count = 0
def collect_incoming_data (self, data):
self.buffer = self.buffer + data
def found_terminator (self):
data, self.buffer = self.buffer, ''
if not data:
asyncore.close_all()
print "done"
return
n = string.atoi (string.split (data)[0])
tf = trigger_file (self)
self.count = self.count + 1
thread.start_new_thread (thread_function, (tf, self.count, n))
class thread_server (asyncore.dispatcher):
def __init__ (self, family=socket.AF_INET, address=('', 9003)):
asyncore.dispatcher.__init__ (self)
self.create_socket (family, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind (address)
self.listen (5)
def handle_accept (self):
conn, addr = self.accept()
tp = thread_parent (conn, addr)
thread_server()
#asyncore.loop(1.0, use_poll=1)
try:
asyncore.loop ()
except:
asyncore.close_all()
This diff is collapsed.
import sys
import socket
import select
print "Simulating Unix-domain logging using file: %s" % sys.argv[1]
log_socket = socket.socket( socket.AF_UNIX, socket.SOCK_DGRAM )
log_socket.bind( sys.argv[1] )
while 1:
n = select.select( [ log_socket ], [], [] )
print ".",
if n > 0:
print log_socket.recv( 1024 )
# Make medusa into a package
__version__='$Revision: 1.5 $'[11:-2]
This diff is collapsed.
# -*- Mode: Python; tab-width: 4 -*-
# It is tempting to add an __int__ method to this class, but it's not
# a good idea. This class tries to gracefully handle integer
# overflow, and to hide this detail from both the programmer and the
# user. Note that the __str__ method can be relied on for printing out
# the value of a counter:
#
# >>> print 'Total Client: %s' % self.total_clients
#
# If you need to do arithmetic with the value, then use the 'as_long'
# method, the use of long arithmetic is a reminder that the counter
# will overflow.
class counter:
"general-purpose counter"
def __init__ (self, initial_value=0):
self.value = initial_value
def increment (self, delta=1):
result = self.value
try:
self.value = self.value + delta
except OverflowError:
self.value = long(self.value) + delta
return result
def decrement (self, delta=1):
result = self.value
try:
self.value = self.value - delta
except OverflowError:
self.value = long(self.value) - delta
return result
def as_long (self):
return long(self.value)
def __nonzero__ (self):
return self.value != 0
def __repr__ (self):
return '<counter value=%s at %x>' % (self.value, id(self))
def __str__ (self):
return str(long(self.value))[:-1]
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# -*- Mode: Python -*-
# the medusa icon as a python source file.
width = 97
height = 61
data = 'GIF89aa\000=\000\204\000\000\000\000\000\255\255\255\245\245\245ssskkkccc111)))\326\326\326!!!\316\316\316\300\300\300\204\204\000\224\224\224\214\214\214\200\200\200RRR\377\377\377JJJ\367\367\367BBB\347\347\347\000\204\000\020\020\020\265\265\265\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000!\371\004\001\000\000\021\000,\000\000\000\000a\000=\000\000\005\376`$\216di\236h\252\256l\353\276p,\317tm\337x\256\357|m\001@\240E\305\000\364\2164\206R)$\005\201\214\007r\012{X\255\312a\004\260\\>\026\3240\353)\224n\001W+X\334\373\231~\344.\303b\216\024\027x<\273\307\255G,rJiWN\014{S}k"?ti\013EdPQ\207G@_%\000\026yy\\\201\202\227\224<\221Fs$pOjWz\241<r@vO\236\231\233k\247M\2544\203F\177\235\236L#\247\256Z\270,\266BxJ[\276\256A]iE\304\305\262\273E\313\201\275i#\\\303\321\'h\203V\\\177\326\276\216\220P~\335\230_\264\013\342\275\344KF\233\360Q\212\352\246\000\367\274s\361\236\334\347T\341;\341\246\2202\177\3142\211`\242o\325@S\202\264\031\252\207\260\323\256\205\311\036\236\270\002\'\013\302\177\274H\010\324X\002\0176\212\037\376\321\360\032\226\207\244\2674(+^\202\346r\205J\0211\375\241Y#\256f\0127\315>\272\002\325\307g\012(\007\205\312#j\317(\012A\200\224.\241\003\346GS\247\033\245\344\264\366\015L\'PXQl]\266\263\243\232\260?\245\316\371\362\225\035\332\243J\273\332Q\263\357-D\241T\327\270\265\013W&\330\010u\371b\322IW0\214\261]\003\033Va\365Z#\207\213a\030k\2647\262\014p\354\024[n\321N\363\346\317\003\037P\000\235C\302\000\3228(\244\363YaA\005\022\255_\237@\260\000A\212\326\256qbp\321\332\266\011\334=T\023\010"!B\005\003A\010\224\020\220 H\002\337#\020 O\276E\357h\221\327\003\\\000b@v\004\351A.h\365\354\342B\002\011\257\025\\ \220\340\301\353\006\000\024\214\200pA\300\353\012\364\241k/\340\033C\202\003\000\310fZ\011\003V\240R\005\007\354\376\026A\000\000\360\'\202\177\024\004\210\003\000\305\215\360\000\000\015\220\240\332\203\027@\'\202\004\025VpA\000%\210x\321\206\032J\341\316\010\262\211H"l\333\341\200\200>"]P\002\212\011\010`\002\0066FP\200\001\'\024p]\004\027(8B\221\306]\000\201w>\002iB\001\007\340\260"v7J1\343(\257\020\251\243\011\242i\263\017\215\337\035\220\200\221\365m4d\015\016D\251\341iN\354\346Ng\253\200I\240\031\35609\245\2057\311I\302\2007t\231"&`\314\310\244\011e\226(\236\010w\212\300\234\011\012HX(\214\253\311@\001\233^\222pg{% \340\035\224&H\000\246\201\362\215`@\001"L\340\004\030\234\022\250\'\015(V:\302\235\030\240q\337\205\224\212h@\177\006\000\250\210\004\007\310\207\337\005\257-P\346\257\367]p\353\203\271\256:\203\236\211F\340\247\010\3329g\244\010\307*=A\000\203\260y\012\304s#\014\007D\207,N\007\304\265\027\021C\233\207%B\366[m\353\006\006\034j\360\306+\357\274a\204\000\000;'
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment