Commit 83837d35 authored by Amos Latteier's avatar Amos Latteier

Updates to the latest version of medusa. Only select_trigger.py is not the latest version.

parent 301b54d9
"""This __init__.py file makes medusa into a package"""
# -*- Mode: Python; tab-width: 4 -*-
# $Id: asynchat.py,v 1.6 1999/02/05 02:14:39 amos Exp $
# $Id: asynchat.py,v 1.7 1999/04/09 00:37:33 amos Exp $
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
......@@ -28,8 +28,6 @@
import socket
import asyncore
import string
import types
from producers import NotReady
# This class adds support for 'chat' style protocols - where one side
# sends a 'command', and the other sends a response (examples would be
......@@ -49,19 +47,6 @@ from producers import NotReady
# method) up to the terminator, and then control will be returned to
# you - by calling your self.found_terminator() method
# Added support for sized input. If you set terminator to an integer
# or a long, instead of a string, then output will be collected and
# sent to 'collect_incoming_data' until the given number of bytes have
# been read. At that point, 'found_terminator' will be called.
# Added support for future producers. See producers.py for more info
# on the future producer interface. Suffice it to say that if you
# wish to handle future producers in your asynchat subclass, you
# need to call self.producer_fifo.ready in your writable method. For
# your convenience we include such a method, 'writable_future'. If you
# are not interested in future producers, simply don't use them. You
# incur no overhead.
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
......@@ -70,7 +55,6 @@ class async_chat (asyncore.dispatcher):
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
ac_in_buffer_read = 0
def __init__ (self, conn=None):
self.ac_in_buffer = ''
......@@ -79,13 +63,7 @@ class async_chat (asyncore.dispatcher):
asyncore.dispatcher.__init__ (self, conn)
def set_terminator (self, term):
"""Set the input delimiter.
Can be a fixed string of any length, or None,
or an integer or long to indicate a sized input.
"""
if term is None:
self.terminator = ''
else:
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
self.terminator = term
def get_terminator (self):
......@@ -101,8 +79,7 @@ class async_chat (asyncore.dispatcher):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error, why:
import sys
self.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
self.handle_error()
return
self.ac_in_buffer = self.ac_in_buffer + data
......@@ -113,40 +90,34 @@ class async_chat (asyncore.dispatcher):
# combos with a single recv(1024).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
# if terminator is numeric measure, then collect data
# until we have read that much. ac_in_buffer_read tracks
# how much data has been read.
if type(terminator)==types.IntType:
self.ac_in_buffer_read=self.ac_in_buffer_read + \
len(self.ac_in_buffer)
if self.ac_in_buffer_read < self.terminator:
self.collect_incoming_data(self.ac_in_buffer)
self.ac_in_buffer=''
elif self.ac_in_buffer_read == self.terminator:
self.collect_incoming_data(self.ac_in_buffer)
self.ac_in_buffer=''
self.ac_in_buffer_read=0
self.found_terminator()
if terminator is None:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
elif type(terminator) == type(0):
# numeric terminator
n = terminator
lb = lb
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
self.terminator = self.terminator - lb
else:
border=self.terminator-(self.ac_in_buffer_read-len(data))
self.collect_incoming_data(self.ac_in_buffer[:border])
self.ac_in_buffer=self.ac_in_buffer[border:]
self.ac_in_buffer_read=0
data=''
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
continue
terminator_len = len(terminator)
# 4 cases:
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
# 4) no terminator, just collect the data
if terminator:
terminator_len = len(terminator)
index = string.find (self.ac_in_buffer, terminator)
if index != -1:
# we found the terminator
......@@ -158,6 +129,7 @@ class async_chat (asyncore.dispatcher):
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
......@@ -166,10 +138,6 @@ class async_chat (asyncore.dispatcher):
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
else:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
def handle_write (self):
self.initiate_send ()
......@@ -186,21 +154,21 @@ class async_chat (asyncore.dispatcher):
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
def writable (self):
"predicate for inclusion in the writable for select()"
return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
# To use future producers override writable with writable_future
def writable_future(self):
return len(self.ac_out_buffer) or self.producer_fifo.ready() or (not self.connected)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.push (None)
# refill the outgoing buffer by calling the more() method
# of the first producer in the queue
def refill_buffer (self):
_string_type = type('')
while 1:
if len(self.producer_fifo):
p = self.producer_fifo.first()
......@@ -211,11 +179,11 @@ class async_chat (asyncore.dispatcher):
self.producer_fifo.pop()
self.close()
return
try:
data = p.more()
except NotReady:
elif type(p) is _string_type:
self.producer_fifo.pop()
self.ac_out_buffer = self.ac_out_buffer + p
return
data = p.more()
if data:
self.ac_out_buffer = self.ac_out_buffer + data
return
......@@ -227,36 +195,29 @@ class async_chat (asyncore.dispatcher):
def initiate_send (self):
obs = self.ac_out_buffer_size
# try to refill the buffer
if (not self._push_mode) and (len (self.ac_out_buffer) < obs):
if (len (self.ac_out_buffer) < obs):
self.refill_buffer()
if self.ac_out_buffer and self.connected:
# try to send the buffer
try:
num_sent = self.send (self.ac_out_buffer[:obs])
if num_sent:
self.ac_out_buffer = self.ac_out_buffer[num_sent:]
except socket.error, why:
self.handle_error()
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = ''
self.ac_out_buffer == ''
self.producer_fifo.list=[]
# ==================================================
# support for push mode.
# ==================================================
_push_mode = 0
def push_mode (self, boolean):
self._push_mode = boolean
def writable_push (self):
return self.connected and len(self.ac_out_buffer)
while self.producer_fifo:
self.producer_fifo.pop()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
......@@ -288,18 +249,13 @@ class fifo:
self.list.append (data)
def pop (self):
if self.ready():
if self.list:
result = self.list[0]
del self.list[0]
return (1, result)
else:
return (0, None)
def ready(self):
"Is the first producer in the fifo ready?"
if len(self.list):
return not hasattr(self.list[0],'ready') or self.list[0].ready()
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
......
# -*- Mode: Python; tab-width: 4 -*-
# $Id: asyncore.py,v 1.1 1999/01/08 23:04:42 jim Exp $
# $Id: asyncore.py,v 1.2 1999/04/09 00:37:33 amos Exp $
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
......@@ -47,25 +47,20 @@ def poll (timeout=0.0):
sockets = socket_map.keys()
r = filter (lambda x: x.readable(), sockets)
w = filter (lambda x: x.writable(), sockets)
e = sockets[:]
e = []
(r,w,e) = select.select (r,w,e, timeout)
for x in e:
try:
x.handle_expt_event()
except:
x.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
for x in r:
try:
x.handle_read_event()
except:
x.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
x.handle_error()
for x in w:
try:
x.handle_write_event()
except:
x.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
x.handle_error()
def poll2 (timeout=0.0):
import poll
......@@ -85,7 +80,6 @@ def poll2 (timeout=0.0):
if flags:
l.append (fd, flags)
r = poll.poll (l, timeout)
print r
for fd, flags in r:
s = fd_map[fd]
try:
......@@ -96,7 +90,7 @@ def poll2 (timeout=0.0):
if (flags & poll.POLLERR):
s.handle_expt_event()
except:
apply (s.handle_error, sys.exc_info())
s.handle_error()
def loop (timeout=30.0, use_poll=0):
......@@ -114,7 +108,6 @@ class dispatcher:
connected = 0
accepting = 0
closing = 0
_fileno = None
addr = None
def __init__ (self, sock=None):
......@@ -150,14 +143,6 @@ class dispatcher:
self.log ('adding channel %s' % self)
socket_map [self] = 1
# we cache the original fileno, because after closing
# a socket, s.fileno() will return -1, and we want to
# continue tracking it via the original number.
def fileno (self):
if self._fileno is None:
self._fileno = self.socket.fileno()
return self._fileno
def del_channel (self):
if socket_map.has_key (self):
self.log ('closing channel %d:%s' % (self.fileno(), self))
......@@ -216,6 +201,7 @@ class dispatcher:
return self.socket.bind (addr)
def connect (self, address):
self.connected = 0
try:
self.socket.connect (address)
except socket.error, why:
......@@ -268,8 +254,6 @@ class dispatcher:
def close (self):
self.del_channel()
self.socket.close()
self._fileno = None
self.connected = 0
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
......@@ -306,9 +290,8 @@ class dispatcher:
def handle_expt_event (self):
self.handle_expt()
def handle_error (self, *info):
(t,v,tb) = info
(file,fun,line), tbinfo = compact_traceback (t,v,tb)
def handle_error (self):
(file,fun,line), t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
......@@ -319,12 +302,11 @@ class dispatcher:
print (
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
str(t),
str(v),
t,
v,
tbinfo
)
)
del t,v,tb
self.close()
def handle_expt (self):
......@@ -339,9 +321,6 @@ class dispatcher:
def handle_connect (self):
self.log ('unhandled connect event')
def handle_oob (self):
self.log ('unhandled out-of-band event')
def handle_accept (self):
self.log ('unhandled accept event')
......@@ -380,7 +359,8 @@ class dispatcher_with_send (dispatcher):
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback (t,v,tb):
def compact_traceback ():
t,v,tb = sys.exc_info()
tbinfo = []
while 1:
tbinfo.append (
......@@ -392,6 +372,9 @@ def compact_traceback (t,v,tb):
if not tb:
break
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = '[' + string.join (
map (
......@@ -400,7 +383,7 @@ def compact_traceback (t,v,tb):
),
'] ['
) + ']'
return (file, function, line), info
return (file, function, line), t, v, info
def close_all ():
global socket_map
......@@ -457,3 +440,4 @@ if os.name == 'posix':
def set_file (self, fd):
self.socket = file_wrapper (fd)
self.add_channel()
......@@ -8,7 +8,7 @@
# interested in using this software in a commercial context, or in
# purchasing support, please contact the author.
RCS_ID = '$Id: default_handler.py,v 1.1 1999/01/09 03:17:31 amos Exp $'
RCS_ID = '$Id: default_handler.py,v 1.2 1999/04/09 00:37:33 amos Exp $'
# standard python modules
import os
......@@ -203,7 +203,7 @@ class default_handler:
request.done()
def set_content_type (self, path, request):
ext = get_extension (path)
ext = string.lower (get_extension (path))
if mime_type_table.content_type_map.has_key (ext):
request['Content-Type'] = mime_type_table.content_type_map[ext]
else:
......
# -*- Mode: Python; tab-width: 4 -*-
# $Id: filesys.py,v 1.1 1999/01/18 22:44:21 amos Exp $
# $Id: filesys.py,v 1.2 1999/04/09 00:37:33 amos Exp $
# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
......
......@@ -8,7 +8,7 @@
# If you are interested in using this software in a commercial context,
# or in purchasing support, please contact the author.
RCS_ID = '$Id: ftp_server.py,v 1.1 1999/01/21 22:52:08 amos Exp $'
RCS_ID = '$Id: ftp_server.py,v 1.2 1999/04/09 00:37:33 amos Exp $'
# An extensible, configurable, asynchronous FTP server.
#
......@@ -157,8 +157,7 @@ class ftp_channel (asynchat.async_chat):
result = apply (fun, (line,))
except:
self.server.total_exceptions.increment()
t,v,tb = sys.exc_info()
(file, fun, line), ctb = asyncore.compact_traceback (t,v,tb)
(file, fun, line), t,v, tbinfo = asyncore.compact_traceback()
if self.client_dc:
try:
self.client_dc.close()
......@@ -166,7 +165,7 @@ class ftp_channel (asynchat.async_chat):
pass
self.respond (
'451 Server Error: %s, %s: file: %s line: %s' % (
str(t),str(v),file,line,
t,v,file,line,
)
)
......@@ -220,7 +219,7 @@ class ftp_channel (asynchat.async_chat):
# --------------------------------------------------
def check_command_authorization (self, command):
if command in ['stor', 'dele'] and self.read_only:
if command in self.write_commands and self.read_only:
return 0
else:
return 1
......@@ -375,9 +374,10 @@ class ftp_channel (asynchat.async_chat):
'prepare for server-to-server transfer'
pc = self.new_passive_acceptor()
port = pc.addr[1]
ip_addr = pc.control_channel.getsockname()[0]
self.respond (
'227 Entering Passive Mode. %s,%d,%d' % (
string.join (string.split (IP_ADDRESS, '.'), ','),
'227 Entering Passive Mode (%s,%d,%d)' % (
string.join (string.split (ip_addr, '.'), ','),
port/256,
port%256
)
......@@ -621,6 +621,22 @@ class ftp_channel (asynchat.async_chat):
'350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.' % pos
)
def cmd_stru (self, line):
'obsolete - set file transfer structure'
if line[1] in 'fF':
# f == 'file'
self.respond ('200 STRU F Ok')
else:
self.respond ('504 Unimplemented STRU type')
def cmd_mode (self, line):
'obsolete - set file transfer mode'
if line[1] in 'sS':
# f == 'file'
self.respond ('200 MODE S Ok')
else:
self.respond ('502 Unimplemented MODE type')
# The stat command has two personalities. Normally it returns status
# information about the current connection. But if given an argument,
# it is equivalent to the LIST command, with the data sent over the
......@@ -794,9 +810,6 @@ class passive_acceptor (asyncore.dispatcher):
asyncore.dispatcher.__init__ (self)
self.control_channel = control_channel
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
#self.bind ((IP_ADDRESS, 0))
#self.bind (('', 0))
# bind to an address on the interface that the
# control connection is coming from.
self.bind ((
......@@ -856,8 +869,7 @@ class xmit_channel (asynchat.async_chat):
self.bytes_out = self.bytes_out + result
return result
def handle_error (self, t,v,tb):
import errno
def handle_error (self):
# usually this is to catch an unexpected disconnect.
self.log ('unexpected disconnect on data xmit channel')
self.close()
......
......@@ -109,16 +109,15 @@ def build_http_date (when):
def parse_http_date (d):
d = string.lower (d)
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if time.daylight:
tz = time.altzone
else:
tz = time.timezone
# rfc850 comes first, netscape uses it. <frown>
if rfc850_reg.match (d) == len(d):
return int (time.mktime (unpack_rfc850()) - tz)
retval = int (time.mktime (unpack_rfc850()) - tz)
elif rfc822_reg.match (d) == len(d):
return int (time.mktime (unpack_rfc822()) - tz)
retval = int (time.mktime (unpack_rfc822()) - tz)
else:
return 0
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
retval = retval + (tz - time.altzone)
return retval
......@@ -9,7 +9,7 @@
# interested in using this software in a commercial context, or in
# purchasing support, please contact the author.
RCS_ID = '$Id: http_server.py,v 1.5 1999/03/12 19:21:40 brian Exp $'
RCS_ID = '$Id: http_server.py,v 1.6 1999/04/09 00:37:33 amos Exp $'
# python modules
import os
......@@ -54,7 +54,6 @@ class http_request:
# If your clients are having trouble, you might want to disable this.
use_chunked = 1
# by default, this request object ignores user data.
collector = None
......@@ -70,6 +69,8 @@ class http_request:
'Date' : http_date.build_http_date (time.time())
}
self.request_number = http_request.request_counter.increment()
self._split_uri = None
self._header_cache = {}
# --------------------------------------------------
# reply header management
......@@ -92,6 +93,47 @@ class http_request:
'\r\n'
) + '\r\n\r\n'
# --------------------------------------------------
# split a uri
# --------------------------------------------------
# <path>;<params>?<query>#<fragment>
path_regex = regex.compile (
# path params query fragment
'\\([^;?#]*\\)\\(;[^?#]*\\)?\\(\\?[^#]*\)?\(#.*\)?'
)
def split_uri (self):
if self._split_uri is None:
if self.path_regex.match (self.uri) != len(self.uri):
raise ValueError, "Broken URI"
else:
self._split_uri = map (lambda i,r=self.path_regex: r.group(i), range(1,5))
return self._split_uri
def get_header_with_regex (self, head_reg, group):
for line in self.header:
if head_reg.match (line) == len(line):
return head_reg.group(group)
return ''
def get_header (self, header):
header = string.lower (header)
hc = self._header_cache
if not hc.has_key (header):
h = header + ': '
hl = len(h)
for line in self.header:
if string.lower (line[:hl]) == h:
r = line[hl:]
hc[header] = r
return r
hc[header] = None
return None
else:
return hc[header]
# --------------------------------------------------
# user data
# --------------------------------------------------
......@@ -112,11 +154,11 @@ class http_request:
'warning: unexpected end-of-record for incoming request\n'
)
def push (self, producer):
if type(producer) == type(''):
self.outgoing.push (producers.simple_producer (producer))
def push (self, thing):
if type(thing) == type(''):
self.outgoing.push (producers.simple_producer (thing))
else:
self.outgoing.push (producer)
self.outgoing.push (thing)
def response (self, code=200):
message = self.responses[code]
......@@ -209,13 +251,6 @@ class http_request:
if close_it:
self.channel.close_when_done()
def log (self, bytes):
print 'request %3d: %s %d bytes' % (
self.request_counter,
self.request,
bytes
)
def log_date_string (self, when):
return time.strftime (
'%d/%b/%Y:%H:%M:%S ',
......@@ -244,14 +279,12 @@ class http_request:
204: "No Content",
205: "Reset Content",
206: "Partial Content",
207: "Multi-Status",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
307: "Temporary Redirect",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
......@@ -273,8 +306,7 @@ class http_request:
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Time-out",
505: "HTTP Version not supported",
507: "Insufficient Storage"
505: "HTTP Version not supported"
}
# Default error message
......@@ -304,7 +336,6 @@ class http_channel (asynchat.async_chat):
current_request = None
channel_counter = counter()
writable=asynchat.async_chat.writable_future
def __init__ (self, server, conn, addr):
self.channel_number = http_channel.channel_counter.increment()
......@@ -407,14 +438,9 @@ class http_channel (asynchat.async_chat):
h.handle_request (r)
except:
self.server.exceptions.increment()
t,v,tb = sys.exc_info()
(file,fun,line),tbinfo = asyncore.compact_traceback (t,v,tb)
while tb.tb_next:
tb = tb.tb_next
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
# Log this to a better place.
print 'Server Error: %s, %s: file: %s line: %s' % (t,v,file,line)
# IMPORTANT: without this <del>, this entire connection will leak. [why?]
del t,v,tb
try:
r.error (500)
except:
......@@ -424,6 +450,10 @@ class http_channel (asynchat.async_chat):
# no handlers, so complain
r.error (404)
def writable (self):
# this is just the normal async_chat 'writable', here for comparison
return self.ac_out_buffer or len(self.producer_fifo)
def writable_for_proxy (self):
# this version of writable supports the idea of a 'stalled' producer
# [i.e., it's not ready to produce any output yet] This is needed by
......@@ -463,15 +493,20 @@ class http_server (asyncore.dispatcher):
self.set_reuse_addr()
self.bind ((ip, port))
self.listen (5)
# lower this to 5 if your OS complains
self.listen (1024)
host, port = self.socket.getsockname()
if not ip:
print 'Warning: computing default hostname'
self.server_name = socket.gethostbyaddr (
socket.gethostbyname (socket.gethostname())
)[0]
else:
ip = socket.gethostbyname (socket.gethostname())
try:
self.server_name = socket.gethostbyaddr (ip)[0]
except socket.error:
print 'Warning: cannot do reverse lookup'
self.server_name = ip # use the IP address as the "hostname"
self.server_port = port
self.total_clients = counter()
self.total_requests = counter()
......@@ -541,7 +576,7 @@ class http_server (asyncore.dispatcher):
handler_stats = filter (None, map (maybe_status, self.handlers))
if self.total_clients:
ratio = float(self.total_requests.as_long()/self.total_clients.as_long())
ratio = self.total_requests.as_long() / float(self.total_clients.as_long())
else:
ratio = 0.0
......@@ -598,13 +633,32 @@ def crack_request (r):
version = None
return string.lower (REQUEST.group (1)), REQUEST.group(2), version
class fifo(asynchat.fifo):
class fifo:
def __init__ (self, list=None):
if not list:
self.list = []
else:
self.list = list
def __len__ (self):
return len(self.list)
def first (self):
return self.list[0]
def push_front (self, object):
self.list.insert (0, object)
def push (self, data):
self.list.append (data)
def pop (self):
if self.list:
result = self.list[0]
del self.list[0]
return (1, result)
else:
return (0, None)
def compute_timezone_for_log ():
if time.daylight:
......
......@@ -3,11 +3,14 @@
import asynchat
import socket
import string
import time # these three are for the rotating logger
import os # |
import stat # v
#
# three types of log:
# 1) file
# with optional flushing.
# with optional flushing. Also, one that rotates the log.
# 2) socket
# dump output directly to a socket connection. [how do we
# keep it open?]
......@@ -31,7 +34,7 @@ import string
class file_logger:
# pass this either a path or a file object.
def __init__ (self, file, flush=1, mode='wa'):
def __init__ (self, file, flush=1, mode='a'):
if type(file) == type(''):
if (file == '-'):
import sys
......@@ -73,6 +76,72 @@ class file_logger:
else:
self.write (message)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
# it backs up the log and starts a new one. Note that backing
# up the log is done via "mv" because anything else (cp, gzip)
# would take time, during which medusa would do nothing else.
class rotating_file_logger (file_logger):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
self.filename = file
self.mode = mode
self.file = open (file, mode)
self.freq = freq
self.maxsize = maxsize
self.rotate_when = self.next_backup(self.freq)
self.do_flush = flush
def __repr__ (self):
return '<rotating-file logger: %s>' % self.file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def next_backup (self, freq):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
if freq == 'daily':
return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
elif freq == 'weekly':
return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
elif freq == 'monthly':
return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
else:
return None # not a date-based backup
def maybe_flush (self): # rotate first if necessary
self.maybe_rotate()
if self.do_flush: # from file_logger()
self.file.flush()
def maybe_rotate (self):
if self.freq and time.time() > self.rotate_when:
self.rotate()
self.rotate_when = self.next_backup(self.freq)
elif self.maxsize: # rotate when we get too big
try:
if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
self.rotate()
except os.error: # file not found, probably
self.rotate() # will create a new file
def rotate (self):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
try:
self.file.close()
newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
try:
open(newname, "r").close() # check if file exists
newname = newname + "-%02d%02d%02d" % (hr, min, sec)
except: # YEARMODY is unique
pass
os.rename(self.filename, newname)
self.file = open(self.filename, self.mode)
except:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
......
# -*- Mode: Python; tab-width: 4 -*-
RCS_ID = '$Id: producers.py,v 1.4 1999/01/18 22:45:22 amos Exp $'
RCS_ID = '$Id: producers.py,v 1.5 1999/04/09 00:37:33 amos Exp $'
import string
......@@ -11,37 +11,8 @@ in various ways to get interesting and useful behaviors.
For example, you can feed dynamically-produced output into the compressing
producer, then wrap this with the 'chunked' transfer-encoding producer.
Producer Interface:
All producers have a 'more' method. 'more' returns a string of output.
'more' can be called multiple times. When 'more' returns '', the producer
is exhausted.
Optional Future Producer Interface:
The future producer interface adds a 'ready' method to producers. This
allows future producers which may not be ready until after they are
created. Returning false means that a call to 'more' will not give you
useful information, right now, but will later. When a producer is exhausted,
it should return true for 'ready'. Producers which are not ready should raise
'NotReady' when their 'more' method is called.
Note: Not having a 'ready' method implies that a producer is always ready.
Note: Composite producers will probably have to consult their sub-produces
to ensure readiness.
Note: If you don't wish to use future producers nothing changes. Simply don't
call a producer's ready method. Everything works as before.
"""
class NotReady(Exception):
"""Raised by future producers when their more method is called
when they are not ready."""
pass
class simple_producer:
"producer for a string"
def __init__ (self, data, buffer_size=1024):
......@@ -84,6 +55,9 @@ class lines_producer:
def __init__ (self, lines):
self.lines = lines
def ready (self):
return len(self.lines)
def more (self):
if self.lines:
chunk = self.lines[:50]
......@@ -92,6 +66,24 @@ class lines_producer:
else:
return ''
class buffer_list_producer:
"producer for a list of buffers"
# i.e., data == string.join (buffers, '')
def __init__ (self, buffers):
self.index = 0
self.buffers = buffers
def more (self):
if self.index >= len(self.buffers):
return ''
else:
data = self.buffers[self.index]
self.index = self.index + 1
return data
class file_producer:
"producer wrapper for file[-like] objects"
......@@ -115,7 +107,6 @@ class file_producer:
else:
return data
# A simple output producer. This one does not [yet] have
# the safety feature builtin to the monitor channel: runaway
# output will not be caught.
......@@ -125,10 +116,8 @@ class file_producer:
class output_producer:
"Acts like an output file; suitable for capturing sys.stdout"
def __init__ (self):
self.data = ''
self.closed = None
def write (self, data):
lines = string.splitfields (data, '\n')
......@@ -145,7 +134,7 @@ class output_producer:
) + '\r\n'
def ready (self):
return (len (self.data) > 0) or self.closed
return (len (self.data) > 0)
def flush (self):
pass
......@@ -161,25 +150,12 @@ class output_producer:
else:
return ''
def close(self):
self.closed=1
class composite_producer:
"combine a fifo of producers into one"
def __init__ (self, producers):
self.producers = producers
self.buffer = ''
def more (self):
if self.buffer:
b=self.buffer
self.buffer=''
return b
# we should only get here when not
# using the ready option
while len(self.producers):
p = self.producers.first()
d = p.more()
......@@ -190,21 +166,6 @@ class composite_producer:
else:
return ''
def ready(self):
# This producer requires a buffer to ensure
# that it really is ready when it says so
if self.buffer or len(self.producers)==0:
return 1
while self.producers.ready():
p = self.producers.first()
d = p.more()
if d:
self.buffer=d
else:
self.producers.pop()
if self.buffer or len(self.producers)==0:
return 1
class globbing_producer:
"""
......@@ -229,13 +190,6 @@ class globbing_producer:
self.buffer = ''
return r
def ready(self):
# XXX doesn't in fact guarentee ready. Should probably
# redo this one like the composite producer... But
# it's not a big deal, cause refill_buffer will
# catch the NotReady exception...
return not hasattr(self.producer,'ready') or self.producer.ready()
class hooked_producer:
"""
......@@ -261,9 +215,6 @@ class hooked_producer:
else:
return ''
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
# correct. In the face of Strange Files, it is conceivable that
# reading a 'file' may produce an amount of data not matching that
......@@ -303,9 +254,6 @@ class chunked_producer:
else:
return ''
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
# Unfortunately this isn't very useful right now (Aug 97), because
# apparently the browsers don't do on-the-fly decompression. Which
# is sad, because this could _really_ speed things up, especially for
......@@ -349,9 +297,6 @@ class compressed_producer:
else:
return ''
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
class escaping_producer:
"A producer that escapes a sequence of characters"
......@@ -384,7 +329,3 @@ class escaping_producer:
return buffer
else:
return buffer
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
......@@ -4,7 +4,7 @@
# Author: Sam Rushing <rushing@nightmare.com>
#
RCS_ID = '$Id: resolver.py,v 1.1 1999/01/09 03:17:32 amos Exp $'
RCS_ID = '$Id: resolver.py,v 1.2 1999/04/09 00:37:33 amos Exp $'
# Fast, low-overhead asynchronous name resolver. uses 'pre-cooked'
# DNS requests, unpacks only as much as it needs of the reply.
......@@ -247,10 +247,8 @@ class resolver (asyncore.dispatcher):
try:
callback (host, ttl, answer)
except:
t,v,tb = sys.exc_info()
(file,fun,line), tbinfo = asyncore.compact_traceback (t,v,tb)
print t,v
print tbinfo
(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
print t,v,tbinfo
class rbl (resolver):
......
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING = "$Id: status_handler.py,v 1.2 1999/04/09 00:37:33 amos Exp $"
#
# medusa status extension
#
......@@ -32,11 +34,15 @@ def split_path (path):
class status_extension:
hit_counter = counter()
hyper_regex = regex.compile ('/status/object/\([0-9]+\)/.*')
def __init__ (self, objects, regexp='/status\(/.*\)?'):
def __init__ (self, objects, statusdir='/status', allow_emergency_debug=0):
self.objects = objects
self.regexp = regex.compile (regexp)
self.statusdir = statusdir
self.allow_emergency_debug = allow_emergency_debug
# We use /status instead of statusdir here because it's too
# hard to pass statusdir to the logger, who makes the HREF
# to the object dir. We don't need the security-through-
# obscurity here in any case, because the id is obscurity enough
self.hyper_regex = regex.compile('/status/object/\([0-9]+\)/.*')
self.hyper_objects = []
for object in objects:
self.register_hyper_object (object)
......@@ -49,7 +55,9 @@ class status_extension:
def match (self, request):
[path, params, query, fragment] = split_path (request.uri)
return self.regexp.match (path) == len(path)
# For reasons explained above, we don't use statusdir for /object
return (path[:len(self.statusdir)] == self.statusdir or
path[:len("/status/object/")] == '/status/object/')
# Possible Targets:
# /status
......@@ -67,7 +75,7 @@ class status_extension:
def handle_request (self, request):
[path, params, query, fragment] = split_path (request.uri)
self.hit_counter.increment()
if path == '/status':
if path == self.statusdir: # and not a subdirectory
up_time = string.join (english_time (long(time.time()) - START_TIME))
request['Content-Type'] = 'text/html'
request.push (
......@@ -81,23 +89,25 @@ class status_extension:
request.push (self.objects[i].status())
request.push ('<hr>\r\n')
request.push (
'<p><a href="/status/channel_list">Channel List</a>'
'<p><a href="%s/channel_list">Channel List</a>'
'<hr>'
'<img src="/status/medusa.gif" align=right width=%d height=%d>' % (
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
'</body></html>' % (
self.statusdir,
self.statusdir,
medusa_gif.width,
medusa_gif.height
) +
'</body></html>'
)
)
request.done()
elif path == '/status/channel_list':
elif path == self.statusdir + '/channel_list':
request['Content-Type'] = 'text/html'
request.push ('<html><body>')
request.push(channel_list_producer())
request.push(channel_list_producer(self.statusdir))
request.push (
'<hr>'
'<img src="/status/medusa.gif" align=right width=%d height=%d>' % (
'<img src="%s/medusa.gif" align=right width=%d height=%d>' % (
self.statusdir,
medusa_gif.width,
medusa_gif.height
) +
......@@ -105,16 +115,16 @@ class status_extension:
)
request.done()
elif path == '/status/medusa.gif':
elif path == self.statusdir + '/medusa.gif':
request['Content-Type'] = 'image/gif'
request['Content-Length'] = len(medusa_gif.data)
request.push (medusa_gif.data)
request.done()
elif path == '/status/close_zombies':
elif path == self.statusdir + '/close_zombies':
message = (
'<h2>Closing all zombie http client connections...</h2>'
'<p><a href="/status">Back to the status page</a>'
'<p><a href="%s">Back to the status page</a>' % self.statusdir
)
request['Content-Type'] = 'text/html'
request['Content-Length'] = len (message)
......@@ -131,8 +141,7 @@ class status_extension:
# If a server is running away from you, don't KILL it!
# Move all the AF_INET server ports and perform an autopsy...
# [disabled by default to protect the innocent]
#elif path == '/status/emergency_debug':
elif 0:
elif self.allow_emergency_debug and path == self.statusdir + '/emergency_debug':
request.push ('<html>Moving All Servers...</html>')
request.done()
for channel in asyncore.socket_map.keys():
......@@ -154,6 +163,7 @@ class status_extension:
if id (object) == oid:
if hasattr (object, 'hyper_respond'):
object.hyper_respond (self, path, request)
else:
request.error (404)
return
......@@ -200,7 +210,7 @@ class lines_producer:
return ''
class channel_list_producer (lines_producer):
def __init__ (self):
def __init__ (self, statusdir):
channel_reprs = map (
lambda x: '&lt;' + repr(x)[1:-1] + '&gt;',
asyncore.socket_map.keys()
......@@ -212,7 +222,7 @@ class channel_list_producer (lines_producer):
'<pre>'
] + channel_reprs + [
'</pre>',
'<p><a href="/status">Status Report</a>'
'<p><a href="%s">Status Report</a>' % statusdir
]
)
......
"""This __init__.py file makes medusa into a package"""
# -*- Mode: Python; tab-width: 4 -*-
# $Id: asynchat.py,v 1.6 1999/02/05 02:14:39 amos Exp $
# $Id: asynchat.py,v 1.7 1999/04/09 00:37:33 amos Exp $
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
......@@ -28,8 +28,6 @@
import socket
import asyncore
import string
import types
from producers import NotReady
# This class adds support for 'chat' style protocols - where one side
# sends a 'command', and the other sends a response (examples would be
......@@ -49,19 +47,6 @@ from producers import NotReady
# method) up to the terminator, and then control will be returned to
# you - by calling your self.found_terminator() method
# Added support for sized input. If you set terminator to an integer
# or a long, instead of a string, then output will be collected and
# sent to 'collect_incoming_data' until the given number of bytes have
# been read. At that point, 'found_terminator' will be called.
# Added support for future producers. See producers.py for more info
# on the future producer interface. Suffice it to say that if you
# wish to handle future producers in your asynchat subclass, you
# need to call self.producer_fifo.ready in your writable method. For
# your convenience we include such a method, 'writable_future'. If you
# are not interested in future producers, simply don't use them. You
# incur no overhead.
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
......@@ -70,7 +55,6 @@ class async_chat (asyncore.dispatcher):
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
ac_in_buffer_read = 0
def __init__ (self, conn=None):
self.ac_in_buffer = ''
......@@ -79,13 +63,7 @@ class async_chat (asyncore.dispatcher):
asyncore.dispatcher.__init__ (self, conn)
def set_terminator (self, term):
"""Set the input delimiter.
Can be a fixed string of any length, or None,
or an integer or long to indicate a sized input.
"""
if term is None:
self.terminator = ''
else:
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
self.terminator = term
def get_terminator (self):
......@@ -101,8 +79,7 @@ class async_chat (asyncore.dispatcher):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error, why:
import sys
self.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
self.handle_error()
return
self.ac_in_buffer = self.ac_in_buffer + data
......@@ -113,40 +90,34 @@ class async_chat (asyncore.dispatcher):
# combos with a single recv(1024).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
# if terminator is numeric measure, then collect data
# until we have read that much. ac_in_buffer_read tracks
# how much data has been read.
if type(terminator)==types.IntType:
self.ac_in_buffer_read=self.ac_in_buffer_read + \
len(self.ac_in_buffer)
if self.ac_in_buffer_read < self.terminator:
self.collect_incoming_data(self.ac_in_buffer)
self.ac_in_buffer=''
elif self.ac_in_buffer_read == self.terminator:
self.collect_incoming_data(self.ac_in_buffer)
self.ac_in_buffer=''
self.ac_in_buffer_read=0
self.found_terminator()
if terminator is None:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
elif type(terminator) == type(0):
# numeric terminator
n = terminator
lb = lb
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
self.terminator = self.terminator - lb
else:
border=self.terminator-(self.ac_in_buffer_read-len(data))
self.collect_incoming_data(self.ac_in_buffer[:border])
self.ac_in_buffer=self.ac_in_buffer[border:]
self.ac_in_buffer_read=0
data=''
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
continue
terminator_len = len(terminator)
# 4 cases:
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
# 4) no terminator, just collect the data
if terminator:
terminator_len = len(terminator)
index = string.find (self.ac_in_buffer, terminator)
if index != -1:
# we found the terminator
......@@ -158,6 +129,7 @@ class async_chat (asyncore.dispatcher):
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
......@@ -166,10 +138,6 @@ class async_chat (asyncore.dispatcher):
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
else:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
def handle_write (self):
self.initiate_send ()
......@@ -186,21 +154,21 @@ class async_chat (asyncore.dispatcher):
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
def writable (self):
"predicate for inclusion in the writable for select()"
return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
# To use future producers override writable with writable_future
def writable_future(self):
return len(self.ac_out_buffer) or self.producer_fifo.ready() or (not self.connected)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.push (None)
# refill the outgoing buffer by calling the more() method
# of the first producer in the queue
def refill_buffer (self):
_string_type = type('')
while 1:
if len(self.producer_fifo):
p = self.producer_fifo.first()
......@@ -211,11 +179,11 @@ class async_chat (asyncore.dispatcher):
self.producer_fifo.pop()
self.close()
return
try:
data = p.more()
except NotReady:
elif type(p) is _string_type:
self.producer_fifo.pop()
self.ac_out_buffer = self.ac_out_buffer + p
return
data = p.more()
if data:
self.ac_out_buffer = self.ac_out_buffer + data
return
......@@ -227,36 +195,29 @@ class async_chat (asyncore.dispatcher):
def initiate_send (self):
obs = self.ac_out_buffer_size
# try to refill the buffer
if (not self._push_mode) and (len (self.ac_out_buffer) < obs):
if (len (self.ac_out_buffer) < obs):
self.refill_buffer()
if self.ac_out_buffer and self.connected:
# try to send the buffer
try:
num_sent = self.send (self.ac_out_buffer[:obs])
if num_sent:
self.ac_out_buffer = self.ac_out_buffer[num_sent:]
except socket.error, why:
self.handle_error()
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = ''
self.ac_out_buffer == ''
self.producer_fifo.list=[]
# ==================================================
# support for push mode.
# ==================================================
_push_mode = 0
def push_mode (self, boolean):
self._push_mode = boolean
def writable_push (self):
return self.connected and len(self.ac_out_buffer)
while self.producer_fifo:
self.producer_fifo.pop()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
......@@ -288,18 +249,13 @@ class fifo:
self.list.append (data)
def pop (self):
if self.ready():
if self.list:
result = self.list[0]
del self.list[0]
return (1, result)
else:
return (0, None)
def ready(self):
"Is the first producer in the fifo ready?"
if len(self.list):
return not hasattr(self.list[0],'ready') or self.list[0].ready()
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
......
# -*- Mode: Python; tab-width: 4 -*-
# $Id: asyncore.py,v 1.1 1999/01/08 23:04:42 jim Exp $
# $Id: asyncore.py,v 1.2 1999/04/09 00:37:33 amos Exp $
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
......@@ -47,25 +47,20 @@ def poll (timeout=0.0):
sockets = socket_map.keys()
r = filter (lambda x: x.readable(), sockets)
w = filter (lambda x: x.writable(), sockets)
e = sockets[:]
e = []
(r,w,e) = select.select (r,w,e, timeout)
for x in e:
try:
x.handle_expt_event()
except:
x.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
for x in r:
try:
x.handle_read_event()
except:
x.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
x.handle_error()
for x in w:
try:
x.handle_write_event()
except:
x.handle_error (sys.exc_type, sys.exc_value, sys.exc_traceback)
x.handle_error()
def poll2 (timeout=0.0):
import poll
......@@ -85,7 +80,6 @@ def poll2 (timeout=0.0):
if flags:
l.append (fd, flags)
r = poll.poll (l, timeout)
print r
for fd, flags in r:
s = fd_map[fd]
try:
......@@ -96,7 +90,7 @@ def poll2 (timeout=0.0):
if (flags & poll.POLLERR):
s.handle_expt_event()
except:
apply (s.handle_error, sys.exc_info())
s.handle_error()
def loop (timeout=30.0, use_poll=0):
......@@ -114,7 +108,6 @@ class dispatcher:
connected = 0
accepting = 0
closing = 0
_fileno = None
addr = None
def __init__ (self, sock=None):
......@@ -150,14 +143,6 @@ class dispatcher:
self.log ('adding channel %s' % self)
socket_map [self] = 1
# we cache the original fileno, because after closing
# a socket, s.fileno() will return -1, and we want to
# continue tracking it via the original number.
def fileno (self):
if self._fileno is None:
self._fileno = self.socket.fileno()
return self._fileno
def del_channel (self):
if socket_map.has_key (self):
self.log ('closing channel %d:%s' % (self.fileno(), self))
......@@ -216,6 +201,7 @@ class dispatcher:
return self.socket.bind (addr)
def connect (self, address):
self.connected = 0
try:
self.socket.connect (address)
except socket.error, why:
......@@ -268,8 +254,6 @@ class dispatcher:
def close (self):
self.del_channel()
self.socket.close()
self._fileno = None
self.connected = 0
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
......@@ -306,9 +290,8 @@ class dispatcher:
def handle_expt_event (self):
self.handle_expt()
def handle_error (self, *info):
(t,v,tb) = info
(file,fun,line), tbinfo = compact_traceback (t,v,tb)
def handle_error (self):
(file,fun,line), t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
......@@ -319,12 +302,11 @@ class dispatcher:
print (
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
str(t),
str(v),
t,
v,
tbinfo
)
)
del t,v,tb
self.close()
def handle_expt (self):
......@@ -339,9 +321,6 @@ class dispatcher:
def handle_connect (self):
self.log ('unhandled connect event')
def handle_oob (self):
self.log ('unhandled out-of-band event')
def handle_accept (self):
self.log ('unhandled accept event')
......@@ -380,7 +359,8 @@ class dispatcher_with_send (dispatcher):
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback (t,v,tb):
def compact_traceback ():
t,v,tb = sys.exc_info()
tbinfo = []
while 1:
tbinfo.append (
......@@ -392,6 +372,9 @@ def compact_traceback (t,v,tb):
if not tb:
break
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = '[' + string.join (
map (
......@@ -400,7 +383,7 @@ def compact_traceback (t,v,tb):
),
'] ['
) + ']'
return (file, function, line), info
return (file, function, line), t, v, info
def close_all ():
global socket_map
......@@ -457,3 +440,4 @@ if os.name == 'posix':
def set_file (self, fd):
self.socket = file_wrapper (fd)
self.add_channel()
......@@ -8,7 +8,7 @@
# interested in using this software in a commercial context, or in
# purchasing support, please contact the author.
RCS_ID = '$Id: default_handler.py,v 1.1 1999/01/09 03:17:31 amos Exp $'
RCS_ID = '$Id: default_handler.py,v 1.2 1999/04/09 00:37:33 amos Exp $'
# standard python modules
import os
......@@ -203,7 +203,7 @@ class default_handler:
request.done()
def set_content_type (self, path, request):
ext = get_extension (path)
ext = string.lower (get_extension (path))
if mime_type_table.content_type_map.has_key (ext):
request['Content-Type'] = mime_type_table.content_type_map[ext]
else:
......
# -*- Mode: Python; tab-width: 4 -*-
# $Id: filesys.py,v 1.1 1999/01/18 22:44:21 amos Exp $
# $Id: filesys.py,v 1.2 1999/04/09 00:37:33 amos Exp $
# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
......
......@@ -8,7 +8,7 @@
# If you are interested in using this software in a commercial context,
# or in purchasing support, please contact the author.
RCS_ID = '$Id: ftp_server.py,v 1.1 1999/01/21 22:52:08 amos Exp $'
RCS_ID = '$Id: ftp_server.py,v 1.2 1999/04/09 00:37:33 amos Exp $'
# An extensible, configurable, asynchronous FTP server.
#
......@@ -157,8 +157,7 @@ class ftp_channel (asynchat.async_chat):
result = apply (fun, (line,))
except:
self.server.total_exceptions.increment()
t,v,tb = sys.exc_info()
(file, fun, line), ctb = asyncore.compact_traceback (t,v,tb)
(file, fun, line), t,v, tbinfo = asyncore.compact_traceback()
if self.client_dc:
try:
self.client_dc.close()
......@@ -166,7 +165,7 @@ class ftp_channel (asynchat.async_chat):
pass
self.respond (
'451 Server Error: %s, %s: file: %s line: %s' % (
str(t),str(v),file,line,
t,v,file,line,
)
)
......@@ -220,7 +219,7 @@ class ftp_channel (asynchat.async_chat):
# --------------------------------------------------
def check_command_authorization (self, command):
if command in ['stor', 'dele'] and self.read_only:
if command in self.write_commands and self.read_only:
return 0
else:
return 1
......@@ -375,9 +374,10 @@ class ftp_channel (asynchat.async_chat):
'prepare for server-to-server transfer'
pc = self.new_passive_acceptor()
port = pc.addr[1]
ip_addr = pc.control_channel.getsockname()[0]
self.respond (
'227 Entering Passive Mode. %s,%d,%d' % (
string.join (string.split (IP_ADDRESS, '.'), ','),
'227 Entering Passive Mode (%s,%d,%d)' % (
string.join (string.split (ip_addr, '.'), ','),
port/256,
port%256
)
......@@ -621,6 +621,22 @@ class ftp_channel (asynchat.async_chat):
'350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.' % pos
)
def cmd_stru (self, line):
'obsolete - set file transfer structure'
if line[1] in 'fF':
# f == 'file'
self.respond ('200 STRU F Ok')
else:
self.respond ('504 Unimplemented STRU type')
def cmd_mode (self, line):
'obsolete - set file transfer mode'
if line[1] in 'sS':
# f == 'file'
self.respond ('200 MODE S Ok')
else:
self.respond ('502 Unimplemented MODE type')
# The stat command has two personalities. Normally it returns status
# information about the current connection. But if given an argument,
# it is equivalent to the LIST command, with the data sent over the
......@@ -794,9 +810,6 @@ class passive_acceptor (asyncore.dispatcher):
asyncore.dispatcher.__init__ (self)
self.control_channel = control_channel
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
#self.bind ((IP_ADDRESS, 0))
#self.bind (('', 0))
# bind to an address on the interface that the
# control connection is coming from.
self.bind ((
......@@ -856,8 +869,7 @@ class xmit_channel (asynchat.async_chat):
self.bytes_out = self.bytes_out + result
return result
def handle_error (self, t,v,tb):
import errno
def handle_error (self):
# usually this is to catch an unexpected disconnect.
self.log ('unexpected disconnect on data xmit channel')
self.close()
......
......@@ -109,16 +109,15 @@ def build_http_date (when):
def parse_http_date (d):
d = string.lower (d)
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if time.daylight:
tz = time.altzone
else:
tz = time.timezone
# rfc850 comes first, netscape uses it. <frown>
if rfc850_reg.match (d) == len(d):
return int (time.mktime (unpack_rfc850()) - tz)
retval = int (time.mktime (unpack_rfc850()) - tz)
elif rfc822_reg.match (d) == len(d):
return int (time.mktime (unpack_rfc822()) - tz)
retval = int (time.mktime (unpack_rfc822()) - tz)
else:
return 0
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if time.daylight and time.localtime(retval)[-1] == 1: # DST correction
retval = retval + (tz - time.altzone)
return retval
......@@ -9,7 +9,7 @@
# interested in using this software in a commercial context, or in
# purchasing support, please contact the author.
RCS_ID = '$Id: http_server.py,v 1.5 1999/03/12 19:21:40 brian Exp $'
RCS_ID = '$Id: http_server.py,v 1.6 1999/04/09 00:37:33 amos Exp $'
# python modules
import os
......@@ -54,7 +54,6 @@ class http_request:
# If your clients are having trouble, you might want to disable this.
use_chunked = 1
# by default, this request object ignores user data.
collector = None
......@@ -70,6 +69,8 @@ class http_request:
'Date' : http_date.build_http_date (time.time())
}
self.request_number = http_request.request_counter.increment()
self._split_uri = None
self._header_cache = {}
# --------------------------------------------------
# reply header management
......@@ -92,6 +93,47 @@ class http_request:
'\r\n'
) + '\r\n\r\n'
# --------------------------------------------------
# split a uri
# --------------------------------------------------
# <path>;<params>?<query>#<fragment>
path_regex = regex.compile (
# path params query fragment
'\\([^;?#]*\\)\\(;[^?#]*\\)?\\(\\?[^#]*\)?\(#.*\)?'
)
def split_uri (self):
if self._split_uri is None:
if self.path_regex.match (self.uri) != len(self.uri):
raise ValueError, "Broken URI"
else:
self._split_uri = map (lambda i,r=self.path_regex: r.group(i), range(1,5))
return self._split_uri
def get_header_with_regex (self, head_reg, group):
for line in self.header:
if head_reg.match (line) == len(line):
return head_reg.group(group)
return ''
def get_header (self, header):
header = string.lower (header)
hc = self._header_cache
if not hc.has_key (header):
h = header + ': '
hl = len(h)
for line in self.header:
if string.lower (line[:hl]) == h:
r = line[hl:]
hc[header] = r
return r
hc[header] = None
return None
else:
return hc[header]
# --------------------------------------------------
# user data
# --------------------------------------------------
......@@ -112,11 +154,11 @@ class http_request:
'warning: unexpected end-of-record for incoming request\n'
)
def push (self, producer):
if type(producer) == type(''):
self.outgoing.push (producers.simple_producer (producer))
def push (self, thing):
if type(thing) == type(''):
self.outgoing.push (producers.simple_producer (thing))
else:
self.outgoing.push (producer)
self.outgoing.push (thing)
def response (self, code=200):
message = self.responses[code]
......@@ -209,13 +251,6 @@ class http_request:
if close_it:
self.channel.close_when_done()
def log (self, bytes):
print 'request %3d: %s %d bytes' % (
self.request_counter,
self.request,
bytes
)
def log_date_string (self, when):
return time.strftime (
'%d/%b/%Y:%H:%M:%S ',
......@@ -244,14 +279,12 @@ class http_request:
204: "No Content",
205: "Reset Content",
206: "Partial Content",
207: "Multi-Status",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
307: "Temporary Redirect",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
......@@ -273,8 +306,7 @@ class http_request:
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Time-out",
505: "HTTP Version not supported",
507: "Insufficient Storage"
505: "HTTP Version not supported"
}
# Default error message
......@@ -304,7 +336,6 @@ class http_channel (asynchat.async_chat):
current_request = None
channel_counter = counter()
writable=asynchat.async_chat.writable_future
def __init__ (self, server, conn, addr):
self.channel_number = http_channel.channel_counter.increment()
......@@ -407,14 +438,9 @@ class http_channel (asynchat.async_chat):
h.handle_request (r)
except:
self.server.exceptions.increment()
t,v,tb = sys.exc_info()
(file,fun,line),tbinfo = asyncore.compact_traceback (t,v,tb)
while tb.tb_next:
tb = tb.tb_next
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
# Log this to a better place.
print 'Server Error: %s, %s: file: %s line: %s' % (t,v,file,line)
# IMPORTANT: without this <del>, this entire connection will leak. [why?]
del t,v,tb
try:
r.error (500)
except:
......@@ -424,6 +450,10 @@ class http_channel (asynchat.async_chat):
# no handlers, so complain
r.error (404)
def writable (self):
# this is just the normal async_chat 'writable', here for comparison
return self.ac_out_buffer or len(self.producer_fifo)
def writable_for_proxy (self):
# this version of writable supports the idea of a 'stalled' producer
# [i.e., it's not ready to produce any output yet] This is needed by
......@@ -463,15 +493,20 @@ class http_server (asyncore.dispatcher):
self.set_reuse_addr()
self.bind ((ip, port))
self.listen (5)
# lower this to 5 if your OS complains
self.listen (1024)
host, port = self.socket.getsockname()
if not ip:
print 'Warning: computing default hostname'
self.server_name = socket.gethostbyaddr (
socket.gethostbyname (socket.gethostname())
)[0]
else:
ip = socket.gethostbyname (socket.gethostname())
try:
self.server_name = socket.gethostbyaddr (ip)[0]
except socket.error:
print 'Warning: cannot do reverse lookup'
self.server_name = ip # use the IP address as the "hostname"
self.server_port = port
self.total_clients = counter()
self.total_requests = counter()
......@@ -541,7 +576,7 @@ class http_server (asyncore.dispatcher):
handler_stats = filter (None, map (maybe_status, self.handlers))
if self.total_clients:
ratio = float(self.total_requests.as_long()/self.total_clients.as_long())
ratio = self.total_requests.as_long() / float(self.total_clients.as_long())
else:
ratio = 0.0
......@@ -598,13 +633,32 @@ def crack_request (r):
version = None
return string.lower (REQUEST.group (1)), REQUEST.group(2), version
class fifo(asynchat.fifo):
class fifo:
def __init__ (self, list=None):
if not list:
self.list = []
else:
self.list = list
def __len__ (self):
return len(self.list)
def first (self):
return self.list[0]
def push_front (self, object):
self.list.insert (0, object)
def push (self, data):
self.list.append (data)
def pop (self):
if self.list:
result = self.list[0]
del self.list[0]
return (1, result)
else:
return (0, None)
def compute_timezone_for_log ():
if time.daylight:
......
......@@ -3,11 +3,14 @@
import asynchat
import socket
import string
import time # these three are for the rotating logger
import os # |
import stat # v
#
# three types of log:
# 1) file
# with optional flushing.
# with optional flushing. Also, one that rotates the log.
# 2) socket
# dump output directly to a socket connection. [how do we
# keep it open?]
......@@ -31,7 +34,7 @@ import string
class file_logger:
# pass this either a path or a file object.
def __init__ (self, file, flush=1, mode='wa'):
def __init__ (self, file, flush=1, mode='a'):
if type(file) == type(''):
if (file == '-'):
import sys
......@@ -73,6 +76,72 @@ class file_logger:
else:
self.write (message)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
# it backs up the log and starts a new one. Note that backing
# up the log is done via "mv" because anything else (cp, gzip)
# would take time, during which medusa would do nothing else.
class rotating_file_logger (file_logger):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
self.filename = file
self.mode = mode
self.file = open (file, mode)
self.freq = freq
self.maxsize = maxsize
self.rotate_when = self.next_backup(self.freq)
self.do_flush = flush
def __repr__ (self):
return '<rotating-file logger: %s>' % self.file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def next_backup (self, freq):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
if freq == 'daily':
return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
elif freq == 'weekly':
return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
elif freq == 'monthly':
return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
else:
return None # not a date-based backup
def maybe_flush (self): # rotate first if necessary
self.maybe_rotate()
if self.do_flush: # from file_logger()
self.file.flush()
def maybe_rotate (self):
if self.freq and time.time() > self.rotate_when:
self.rotate()
self.rotate_when = self.next_backup(self.freq)
elif self.maxsize: # rotate when we get too big
try:
if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
self.rotate()
except os.error: # file not found, probably
self.rotate() # will create a new file
def rotate (self):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
try:
self.file.close()
newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
try:
open(newname, "r").close() # check if file exists
newname = newname + "-%02d%02d%02d" % (hr, min, sec)
except: # YEARMODY is unique
pass
os.rename(self.filename, newname)
self.file = open(self.filename, self.mode)
except:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
......
# -*- Mode: Python; tab-width: 4 -*-
RCS_ID = '$Id: producers.py,v 1.4 1999/01/18 22:45:22 amos Exp $'
RCS_ID = '$Id: producers.py,v 1.5 1999/04/09 00:37:33 amos Exp $'
import string
......@@ -11,37 +11,8 @@ in various ways to get interesting and useful behaviors.
For example, you can feed dynamically-produced output into the compressing
producer, then wrap this with the 'chunked' transfer-encoding producer.
Producer Interface:
All producers have a 'more' method. 'more' returns a string of output.
'more' can be called multiple times. When 'more' returns '', the producer
is exhausted.
Optional Future Producer Interface:
The future producer interface adds a 'ready' method to producers. This
allows future producers which may not be ready until after they are
created. Returning false means that a call to 'more' will not give you
useful information, right now, but will later. When a producer is exhausted,
it should return true for 'ready'. Producers which are not ready should raise
'NotReady' when their 'more' method is called.
Note: Not having a 'ready' method implies that a producer is always ready.
Note: Composite producers will probably have to consult their sub-produces
to ensure readiness.
Note: If you don't wish to use future producers nothing changes. Simply don't
call a producer's ready method. Everything works as before.
"""
class NotReady(Exception):
"""Raised by future producers when their more method is called
when they are not ready."""
pass
class simple_producer:
"producer for a string"
def __init__ (self, data, buffer_size=1024):
......@@ -84,6 +55,9 @@ class lines_producer:
def __init__ (self, lines):
self.lines = lines
def ready (self):
return len(self.lines)
def more (self):
if self.lines:
chunk = self.lines[:50]
......@@ -92,6 +66,24 @@ class lines_producer:
else:
return ''
class buffer_list_producer:
"producer for a list of buffers"
# i.e., data == string.join (buffers, '')
def __init__ (self, buffers):
self.index = 0
self.buffers = buffers
def more (self):
if self.index >= len(self.buffers):
return ''
else:
data = self.buffers[self.index]
self.index = self.index + 1
return data
class file_producer:
"producer wrapper for file[-like] objects"
......@@ -115,7 +107,6 @@ class file_producer:
else:
return data
# A simple output producer. This one does not [yet] have
# the safety feature builtin to the monitor channel: runaway
# output will not be caught.
......@@ -125,10 +116,8 @@ class file_producer:
class output_producer:
"Acts like an output file; suitable for capturing sys.stdout"
def __init__ (self):
self.data = ''
self.closed = None
def write (self, data):
lines = string.splitfields (data, '\n')
......@@ -145,7 +134,7 @@ class output_producer:
) + '\r\n'
def ready (self):
return (len (self.data) > 0) or self.closed
return (len (self.data) > 0)
def flush (self):
pass
......@@ -161,25 +150,12 @@ class output_producer:
else:
return ''
def close(self):
self.closed=1
class composite_producer:
"combine a fifo of producers into one"
def __init__ (self, producers):
self.producers = producers
self.buffer = ''
def more (self):
if self.buffer:
b=self.buffer
self.buffer=''
return b
# we should only get here when not
# using the ready option
while len(self.producers):
p = self.producers.first()
d = p.more()
......@@ -190,21 +166,6 @@ class composite_producer:
else:
return ''
def ready(self):
# This producer requires a buffer to ensure
# that it really is ready when it says so
if self.buffer or len(self.producers)==0:
return 1
while self.producers.ready():
p = self.producers.first()
d = p.more()
if d:
self.buffer=d
else:
self.producers.pop()
if self.buffer or len(self.producers)==0:
return 1
class globbing_producer:
"""
......@@ -229,13 +190,6 @@ class globbing_producer:
self.buffer = ''
return r
def ready(self):
# XXX doesn't in fact guarentee ready. Should probably
# redo this one like the composite producer... But
# it's not a big deal, cause refill_buffer will
# catch the NotReady exception...
return not hasattr(self.producer,'ready') or self.producer.ready()
class hooked_producer:
"""
......@@ -261,9 +215,6 @@ class hooked_producer:
else:
return ''
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
# correct. In the face of Strange Files, it is conceivable that
# reading a 'file' may produce an amount of data not matching that
......@@ -303,9 +254,6 @@ class chunked_producer:
else:
return ''
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
# Unfortunately this isn't very useful right now (Aug 97), because
# apparently the browsers don't do on-the-fly decompression. Which
# is sad, because this could _really_ speed things up, especially for
......@@ -349,9 +297,6 @@ class compressed_producer:
else:
return ''
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
class escaping_producer:
"A producer that escapes a sequence of characters"
......@@ -384,7 +329,3 @@ class escaping_producer:
return buffer
else:
return buffer
def ready(self):
return not hasattr(self.producer,'ready') or self.producer.ready()
......@@ -4,7 +4,7 @@
# Author: Sam Rushing <rushing@nightmare.com>
#
RCS_ID = '$Id: resolver.py,v 1.1 1999/01/09 03:17:32 amos Exp $'
RCS_ID = '$Id: resolver.py,v 1.2 1999/04/09 00:37:33 amos Exp $'
# Fast, low-overhead asynchronous name resolver. uses 'pre-cooked'
# DNS requests, unpacks only as much as it needs of the reply.
......@@ -247,10 +247,8 @@ class resolver (asyncore.dispatcher):
try:
callback (host, ttl, answer)
except:
t,v,tb = sys.exc_info()
(file,fun,line), tbinfo = asyncore.compact_traceback (t,v,tb)
print t,v
print tbinfo
(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
print t,v,tbinfo
class rbl (resolver):
......
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING = "$Id: status_handler.py,v 1.2 1999/04/09 00:37:33 amos Exp $"
#
# medusa status extension
#
......@@ -32,11 +34,15 @@ def split_path (path):
class status_extension:
hit_counter = counter()
hyper_regex = regex.compile ('/status/object/\([0-9]+\)/.*')
def __init__ (self, objects, regexp='/status\(/.*\)?'):
def __init__ (self, objects, statusdir='/status', allow_emergency_debug=0):
self.objects = objects
self.regexp = regex.compile (regexp)
self.statusdir = statusdir
self.allow_emergency_debug = allow_emergency_debug
# We use /status instead of statusdir here because it's too
# hard to pass statusdir to the logger, who makes the HREF
# to the object dir. We don't need the security-through-
# obscurity here in any case, because the id is obscurity enough
self.hyper_regex = regex.compile('/status/object/\([0-9]+\)/.*')
self.hyper_objects = []
for object in objects:
self.register_hyper_object (object)
......@@ -49,7 +55,9 @@ class status_extension:
def match (self, request):
[path, params, query, fragment] = split_path (request.uri)
return self.regexp.match (path) == len(path)
# For reasons explained above, we don't use statusdir for /object
return (path[:len(self.statusdir)] == self.statusdir or
path[:len("/status/object/")] == '/status/object/')
# Possible Targets:
# /status
......@@ -67,7 +75,7 @@ class status_extension:
def handle_request (self, request):
[path, params, query, fragment] = split_path (request.uri)
self.hit_counter.increment()
if path == '/status':
if path == self.statusdir: # and not a subdirectory
up_time = string.join (english_time (long(time.time()) - START_TIME))
request['Content-Type'] = 'text/html'
request.push (
......@@ -81,23 +89,25 @@ class status_extension:
request.push (self.objects[i].status())
request.push ('<hr>\r\n')
request.push (
'<p><a href="/status/channel_list">Channel List</a>'
'<p><a href="%s/channel_list">Channel List</a>'
'<hr>'
'<img src="/status/medusa.gif" align=right width=%d height=%d>' % (
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
'</body></html>' % (
self.statusdir,
self.statusdir,
medusa_gif.width,
medusa_gif.height
) +
'</body></html>'
)
)
request.done()
elif path == '/status/channel_list':
elif path == self.statusdir + '/channel_list':
request['Content-Type'] = 'text/html'
request.push ('<html><body>')
request.push(channel_list_producer())
request.push(channel_list_producer(self.statusdir))
request.push (
'<hr>'
'<img src="/status/medusa.gif" align=right width=%d height=%d>' % (
'<img src="%s/medusa.gif" align=right width=%d height=%d>' % (
self.statusdir,
medusa_gif.width,
medusa_gif.height
) +
......@@ -105,16 +115,16 @@ class status_extension:
)
request.done()
elif path == '/status/medusa.gif':
elif path == self.statusdir + '/medusa.gif':
request['Content-Type'] = 'image/gif'
request['Content-Length'] = len(medusa_gif.data)
request.push (medusa_gif.data)
request.done()
elif path == '/status/close_zombies':
elif path == self.statusdir + '/close_zombies':
message = (
'<h2>Closing all zombie http client connections...</h2>'
'<p><a href="/status">Back to the status page</a>'
'<p><a href="%s">Back to the status page</a>' % self.statusdir
)
request['Content-Type'] = 'text/html'
request['Content-Length'] = len (message)
......@@ -131,8 +141,7 @@ class status_extension:
# If a server is running away from you, don't KILL it!
# Move all the AF_INET server ports and perform an autopsy...
# [disabled by default to protect the innocent]
#elif path == '/status/emergency_debug':
elif 0:
elif self.allow_emergency_debug and path == self.statusdir + '/emergency_debug':
request.push ('<html>Moving All Servers...</html>')
request.done()
for channel in asyncore.socket_map.keys():
......@@ -154,6 +163,7 @@ class status_extension:
if id (object) == oid:
if hasattr (object, 'hyper_respond'):
object.hyper_respond (self, path, request)
else:
request.error (404)
return
......@@ -200,7 +210,7 @@ class lines_producer:
return ''
class channel_list_producer (lines_producer):
def __init__ (self):
def __init__ (self, statusdir):
channel_reprs = map (
lambda x: '&lt;' + repr(x)[1:-1] + '&gt;',
asyncore.socket_map.keys()
......@@ -212,7 +222,7 @@ class channel_list_producer (lines_producer):
'<pre>'
] + channel_reprs + [
'</pre>',
'<p><a href="/status">Status Report</a>'
'<p><a href="%s">Status Report</a>' % statusdir
]
)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment