Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
Zope
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
Zope
Commits
c658c172
Commit
c658c172
authored
Mar 18, 2003
by
Fred Drake
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Move ZServer into new location, including configuration support from the
new-install-branch.
parent
5aa6b549
Changes
69
Hide whitespace changes
Inline
Side-by-side
Showing
69 changed files
with
12207 additions
and
0 deletions
+12207
-0
lib/python/ZServer/AccessLogger.py
lib/python/ZServer/AccessLogger.py
+31
-0
lib/python/ZServer/DebugLogger.py
lib/python/ZServer/DebugLogger.py
+57
-0
lib/python/ZServer/FCGIServer.py
lib/python/ZServer/FCGIServer.py
+766
-0
lib/python/ZServer/FTPRequest.py
lib/python/ZServer/FTPRequest.py
+124
-0
lib/python/ZServer/FTPResponse.py
lib/python/ZServer/FTPResponse.py
+95
-0
lib/python/ZServer/FTPServer.py
lib/python/ZServer/FTPServer.py
+642
-0
lib/python/ZServer/HTTPResponse.py
lib/python/ZServer/HTTPResponse.py
+310
-0
lib/python/ZServer/HTTPServer.py
lib/python/ZServer/HTTPServer.py
+408
-0
lib/python/ZServer/ICPServer.py
lib/python/ZServer/ICPServer.py
+128
-0
lib/python/ZServer/INSTALL.txt
lib/python/ZServer/INSTALL.txt
+123
-0
lib/python/ZServer/PCGIServer.py
lib/python/ZServer/PCGIServer.py
+399
-0
lib/python/ZServer/Producers.py
lib/python/ZServer/Producers.py
+104
-0
lib/python/ZServer/PubCore/ZEvent.py
lib/python/ZServer/PubCore/ZEvent.py
+37
-0
lib/python/ZServer/PubCore/ZRendezvous.py
lib/python/ZServer/PubCore/ZRendezvous.py
+63
-0
lib/python/ZServer/PubCore/ZServerPublisher.py
lib/python/ZServer/PubCore/ZServerPublisher.py
+26
-0
lib/python/ZServer/PubCore/__init__.py
lib/python/ZServer/PubCore/__init__.py
+30
-0
lib/python/ZServer/README.txt
lib/python/ZServer/README.txt
+236
-0
lib/python/ZServer/WebDAVSrcHandler.py
lib/python/ZServer/WebDAVSrcHandler.py
+59
-0
lib/python/ZServer/ZService.py
lib/python/ZServer/ZService.py
+241
-0
lib/python/ZServer/__init__.py
lib/python/ZServer/__init__.py
+78
-0
lib/python/ZServer/component.xml
lib/python/ZServer/component.xml
+61
-0
lib/python/ZServer/datatypes.py
lib/python/ZServer/datatypes.py
+167
-0
lib/python/ZServer/medusa/__init__.py
lib/python/ZServer/medusa/__init__.py
+3
-0
lib/python/ZServer/medusa/chat_server.py
lib/python/ZServer/medusa/chat_server.py
+150
-0
lib/python/ZServer/medusa/counter.py
lib/python/ZServer/medusa/counter.py
+47
-0
lib/python/ZServer/medusa/default_handler.py
lib/python/ZServer/medusa/default_handler.py
+217
-0
lib/python/ZServer/medusa/dist/license.html
lib/python/ZServer/medusa/dist/license.html
+26
-0
lib/python/ZServer/medusa/docs/README.html
lib/python/ZServer/medusa/docs/README.html
+238
-0
lib/python/ZServer/medusa/docs/composing_producers.gif
lib/python/ZServer/medusa/docs/composing_producers.gif
+0
-0
lib/python/ZServer/medusa/docs/data_flow.gif
lib/python/ZServer/medusa/docs/data_flow.gif
+0
-0
lib/python/ZServer/medusa/docs/data_flow.html
lib/python/ZServer/medusa/docs/data_flow.html
+83
-0
lib/python/ZServer/medusa/docs/producers.gif
lib/python/ZServer/medusa/docs/producers.gif
+0
-0
lib/python/ZServer/medusa/docs/proxy_notes.txt
lib/python/ZServer/medusa/docs/proxy_notes.txt
+36
-0
lib/python/ZServer/medusa/event_loop.py
lib/python/ZServer/medusa/event_loop.py
+93
-0
lib/python/ZServer/medusa/fifo.py
lib/python/ZServer/medusa/fifo.py
+203
-0
lib/python/ZServer/medusa/filesys.py
lib/python/ZServer/medusa/filesys.py
+469
-0
lib/python/ZServer/medusa/ftp_server.py
lib/python/ZServer/medusa/ftp_server.py
+1135
-0
lib/python/ZServer/medusa/http_bobo.py
lib/python/ZServer/medusa/http_bobo.py
+75
-0
lib/python/ZServer/medusa/http_date.py
lib/python/ZServer/medusa/http_date.py
+134
-0
lib/python/ZServer/medusa/http_server.py
lib/python/ZServer/medusa/http_server.py
+826
-0
lib/python/ZServer/medusa/logger.py
lib/python/ZServer/medusa/logger.py
+275
-0
lib/python/ZServer/medusa/m_syslog.py
lib/python/ZServer/medusa/m_syslog.py
+181
-0
lib/python/ZServer/medusa/medusa.html
lib/python/ZServer/medusa/medusa.html
+290
-0
lib/python/ZServer/medusa/medusa_gif.py
lib/python/ZServer/medusa/medusa_gif.py
+8
-0
lib/python/ZServer/medusa/mime_type_table.py
lib/python/ZServer/medusa/mime_type_table.py
+113
-0
lib/python/ZServer/medusa/monitor.py
lib/python/ZServer/medusa/monitor.py
+353
-0
lib/python/ZServer/medusa/monitor_client.py
lib/python/ZServer/medusa/monitor_client.py
+126
-0
lib/python/ZServer/medusa/monitor_client_win32.py
lib/python/ZServer/medusa/monitor_client_win32.py
+53
-0
lib/python/ZServer/medusa/producers.py
lib/python/ZServer/medusa/producers.py
+331
-0
lib/python/ZServer/medusa/put_handler.py
lib/python/ZServer/medusa/put_handler.py
+115
-0
lib/python/ZServer/medusa/redirecting_handler.py
lib/python/ZServer/medusa/redirecting_handler.py
+46
-0
lib/python/ZServer/medusa/resolver.py
lib/python/ZServer/medusa/resolver.py
+445
-0
lib/python/ZServer/medusa/status_handler.py
lib/python/ZServer/medusa/status_handler.py
+282
-0
lib/python/ZServer/medusa/test/__init__.py
lib/python/ZServer/medusa/test/__init__.py
+2
-0
lib/python/ZServer/medusa/test/asyn_http_bench.py
lib/python/ZServer/medusa/test/asyn_http_bench.py
+98
-0
lib/python/ZServer/medusa/test/max_sockets.py
lib/python/ZServer/medusa/test/max_sockets.py
+65
-0
lib/python/ZServer/medusa/test/test_11.py
lib/python/ZServer/medusa/test/test_11.py
+110
-0
lib/python/ZServer/medusa/test/test_lb.py
lib/python/ZServer/medusa/test/test_lb.py
+159
-0
lib/python/ZServer/medusa/test/test_medusa.py
lib/python/ZServer/medusa/test/test_medusa.py
+51
-0
lib/python/ZServer/medusa/test/test_single_11.py
lib/python/ZServer/medusa/test/test_single_11.py
+53
-0
lib/python/ZServer/medusa/test/tests.txt
lib/python/ZServer/medusa/test/tests.txt
+73
-0
lib/python/ZServer/medusa/thread/__init__.py
lib/python/ZServer/medusa/thread/__init__.py
+2
-0
lib/python/ZServer/medusa/thread/pi_module.py
lib/python/ZServer/medusa/thread/pi_module.py
+62
-0
lib/python/ZServer/medusa/thread/select_trigger.py
lib/python/ZServer/medusa/thread/select_trigger.py
+282
-0
lib/python/ZServer/medusa/thread/test_module.py
lib/python/ZServer/medusa/thread/test_module.py
+13
-0
lib/python/ZServer/medusa/thread/thread_channel.py
lib/python/ZServer/medusa/thread/thread_channel.py
+129
-0
lib/python/ZServer/medusa/thread/thread_handler.py
lib/python/ZServer/medusa/thread/thread_handler.py
+364
-0
lib/python/ZServer/tests/__init__.py
lib/python/ZServer/tests/__init__.py
+15
-0
lib/python/ZServer/tests/test_config.py
lib/python/ZServer/tests/test_config.py
+191
-0
No files found.
lib/python/ZServer/AccessLogger.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
A logging module which handles ZServer access log messages.
This depends on Vinay Sajip's PEP 282 logging module.
"""
import
logging
from
zLOG.BaseLogger
import
BaseLogger
class
AccessLogger
(
BaseLogger
):
logger
=
logging
.
getLogger
(
'access'
)
def
log
(
self
,
message
):
if
not
self
.
logger
.
handlers
:
# dont log if we have no handlers
return
if
message
.
endswith
(
'
\
n
'
):
message
=
message
[:
-
1
]
self
.
logger
.
warn
(
message
)
access_logger
=
AccessLogger
()
lib/python/ZServer/DebugLogger.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Logs debugging information about how ZServer is handling requests
and responses. This log can be used to help locate troublesome requests.
The format of a log message is:
<code> <request id> <time> <data>
where:
<code> is B for begin, I for received input, A for received output,
E for sent output.
<request id> is a unique request id.
<time> is the local time in ISO 6801 format.
<data> is the HTTP method and the PATH INFO for B, the size of the
input for I, the HTTP status code and the size of the output for
A, or nothing for E.
"""
import
time
import
logging
from
zLOG.BaseLogger
import
BaseLogger
class
DebugLogger
(
BaseLogger
):
logger
=
logging
.
getLogger
(
'trace'
)
def
log
(
self
,
code
,
request_id
,
data
=
''
):
if
not
self
.
logger
.
handlers
:
return
# Omitting the second parameter requires Python 2.2 or newer.
t
=
time
.
strftime
(
'%Y-%m-%dT%H:%M:%S'
)
message
=
'%s %s %s %s'
%
(
code
,
request_id
,
t
,
data
)
self
.
logger
.
warn
(
message
)
debug_logger
=
DebugLogger
()
log
=
debug_logger
.
log
reopen
=
debug_logger
.
reopen
lib/python/ZServer/FCGIServer.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
ZServer/Medusa FastCGI server, by Robin Dunn.
Accepts connections from a FastCGI enabled webserver, receives request
info using the FastCGi protocol, and then hands the request off to
ZPublisher for processing. The response is then handed back to the
webserver to send down to the browser.
See http://www.fastcgi.com/fcgi-devkit-2.1/doc/fcgi-spec.html for the
protocol specificaition.
"""
__version__
=
"1.0"
#----------------------------------------------------------------------
import
asynchat
,
asyncore
from
medusa
import
logger
from
medusa.counter
import
counter
from
medusa.http_server
import
compute_timezone_for_log
from
ZServer
import
CONNECTION_LIMIT
,
requestCloseOnExec
from
PubCore
import
handle
from
PubCore.ZEvent
import
Wakeup
from
ZPublisher.HTTPResponse
import
HTTPResponse
from
ZPublisher.HTTPRequest
import
HTTPRequest
from
Producers
import
ShutdownProducer
,
LoggingProducer
,
file_part_producer
,
file_close_producer
import
DebugLogger
from
cStringIO
import
StringIO
from
tempfile
import
TemporaryFile
import
socket
,
string
,
os
,
sys
,
time
from
types
import
StringType
import
thread
tz_for_log
=
compute_timezone_for_log
()
#----------------------------------------------------------------------
# Set various FastCGI constants
# Maximum number of requests that can be handled. Apache mod_fastcgi
# never asks for these values, so we actually will handle as many
# connections/requests as they attempt upto the limits of ZServer.
# These values are suitable defaults for any web server that does ask.
FCGI_MAX_CONNS
=
10
FCGI_MAX_REQS
=
50
# Supported version of the FastCGI protocol
FCGI_VERSION_1
=
1
# Boolean: can this application multiplex connections?
FCGI_MPXS_CONNS
=
0
# Record types
FCGI_BEGIN_REQUEST
=
1
FCGI_ABORT_REQUEST
=
2
FCGI_END_REQUEST
=
3
FCGI_PARAMS
=
4
FCGI_STDIN
=
5
FCGI_STDOUT
=
6
FCGI_STDERR
=
7
FCGI_DATA
=
8
FCGI_GET_VALUES
=
9
FCGI_GET_VALUES_RESULT
=
10
FCGI_UNKNOWN_TYPE
=
11
FCGI_MAXTYPE
=
FCGI_UNKNOWN_TYPE
# Types of management records
FCGI_ManagementTypes
=
[
FCGI_GET_VALUES
]
FCGI_NULL_REQUEST_ID
=
0
# Masks for flags component of FCGI_BEGIN_REQUEST
FCGI_KEEP_CONN
=
1
# Values for role component of FCGI_BEGIN_REQUEST
FCGI_RESPONDER
=
1
FCGI_AUTHORIZER
=
2
FCGI_FILTER
=
3
# Values for protocolStatus component of FCGI_END_REQUEST
FCGI_REQUEST_COMPLETE
=
0
# Request completed nicely
FCGI_CANT_MPX_CONN
=
1
# This app can't multiplex
FCGI_OVERLOADED
=
2
# New request rejected; too busy
FCGI_UNKNOWN_ROLE
=
3
# Role value not known
#----------------------------------------------------------------------
class
FCGIRecord
:
"""
This class represents the various record structures used in the
FastCGI protocol. It knows how to read and build itself bits
at a time as they are read from the FCGIChannel. There are really
several different record types but in this case subclassing for
each type is probably overkill.
See the FastCGI spec for structure and other details for all these
record types.
"""
def
__init__
(
self
,
header
=
None
):
if
header
:
# extract the record header values.
vals
=
map
(
ord
,
header
)
self
.
version
=
vals
[
0
]
self
.
recType
=
vals
[
1
]
self
.
reqId
=
(
vals
[
2
]
<<
8
)
+
vals
[
3
]
self
.
contentLength
=
(
vals
[
4
]
<<
8
)
+
vals
[
5
]
self
.
paddingLength
=
vals
[
6
]
else
:
self
.
version
=
FCGI_VERSION_1
self
.
recType
=
FCGI_UNKNOWN_TYPE
self
.
reqId
=
FCGI_NULL_REQUEST_ID
self
.
content
=
""
def
needContent
(
self
):
return
(
self
.
contentLength
and
not
self
.
content
)
def
needPadding
(
self
):
return
self
.
paddingLength
!=
0
def
needMore
(
self
):
if
self
.
needContent
():
return
self
.
contentLength
else
:
return
self
.
paddingLength
def
gotPadding
(
self
):
self
.
paddingLength
=
0
def
parseContent
(
self
,
data
):
c
=
self
.
content
=
data
if
self
.
recType
==
FCGI_BEGIN_REQUEST
:
self
.
role
=
(
ord
(
c
[
0
])
<<
8
)
+
ord
(
c
[
1
])
self
.
flags
=
ord
(
c
[
2
])
elif
self
.
recType
==
FCGI_UNKNOWN_TYPE
:
self
.
unknownType
=
ord
(
c
[
0
])
elif
self
.
recType
==
FCGI_GET_VALUES
or
self
.
recType
==
FCGI_PARAMS
:
self
.
values
=
{}
pos
=
0
while
pos
<
len
(
c
):
name
,
value
,
pos
=
self
.
readPair
(
c
,
pos
)
self
.
values
[
name
]
=
value
elif
self
.
recType
==
FCGI_END_REQUEST
:
b
=
map
(
ord
,
c
[
0
:
4
])
self
.
appStatus
=
(
b
[
0
]
<<
24
)
+
(
b
[
1
]
<<
16
)
+
(
b
[
2
]
<<
8
)
+
b
[
3
]
self
.
protocolStatus
=
ord
(
c
[
4
])
def
readPair
(
self
,
st
,
pos
):
"""
Read the next name-value pair from st at pos.
"""
nameLen
=
ord
(
st
[
pos
])
pos
=
pos
+
1
if
nameLen
&
0x80
:
# is the high bit set? if so, size is 4 bytes, not 1.
b
=
map
(
ord
,
st
[
pos
:
pos
+
3
])
pos
=
pos
+
3
nameLen
=
((
nameLen
&
0x7F
)
<<
24
)
+
(
b
[
0
]
<<
16
)
+
(
b
[
1
]
<<
8
)
+
b
[
2
]
valueLen
=
ord
(
st
[
pos
])
pos
=
pos
+
1
if
valueLen
&
0x80
:
# same thing here...
b
=
map
(
ord
,
st
[
pos
:
pos
+
3
])
pos
=
pos
+
3
valueLen
=
((
valueLen
&
0x7F
)
<<
24
)
+
(
b
[
0
]
<<
16
)
+
(
b
[
1
]
<<
8
)
+
b
[
2
]
# pull out the name and value and return with the updated position
return
(
st
[
pos
:
pos
+
nameLen
],
st
[
pos
+
nameLen
:
pos
+
nameLen
+
valueLen
],
pos
+
nameLen
+
valueLen
)
def
writePair
(
name
,
value
):
"""
Opposite of readPair
"""
l
=
len
(
name
)
if
l
<
0x80
:
st
=
chr
(
l
)
else
:
st
=
chr
(
0x80
|
(
l
>>
24
)
&
0xFF
)
+
chr
((
l
>>
16
)
&
0xFF
)
+
\
chr
((
l
>>
8
)
&
0xFF
)
+
chr
(
l
&
0xFF
)
l
=
len
(
value
)
if
l
<
0x80
:
st
=
st
+
chr
(
l
)
else
:
st
=
st
+
chr
(
0x80
|
(
l
>>
24
)
&
0xFF
)
+
chr
((
l
>>
16
)
&
0xFF
)
+
\
chr
((
l
>>
8
)
&
0xFF
)
+
chr
(
l
&
0xFF
)
return
st
+
name
+
value
def
getRecordAsString
(
self
):
"""
Format the record to be sent back to the web server.
"""
content
=
self
.
content
if
self
.
recType
==
FCGI_BEGIN_REQUEST
:
content
=
chr
(
self
.
role
>>
8
)
+
chr
(
self
.
role
&
0xFF
)
+
\
chr
(
self
.
flags
)
+
5
*
'
\
000
'
elif
self
.
recType
==
FCGI_UNKNOWN_TYPE
:
content
=
chr
(
self
.
unknownType
)
+
7
*
'
\
000
'
elif
self
.
recType
==
FCGI_GET_VALUES
or
self
.
recType
==
FCGI_PARAMS
:
content
=
""
for
i
in
self
.
values
.
keys
():
content
=
content
+
self
.
writePair
(
i
,
self
.
values
[
i
])
elif
self
.
recType
==
FCGI_END_REQUEST
:
v
=
self
.
appStatus
content
=
chr
((
v
>>
24
)
&
0xFF
)
+
chr
((
v
>>
16
)
&
0xFF
)
+
\
chr
((
v
>>
8
)
&
0xFF
)
+
chr
(
v
&
0xFF
)
content
=
content
+
chr
(
self
.
protocolStatus
)
+
3
*
'
\
000
'
cLen
=
len
(
content
)
eLen
=
(
cLen
+
7
)
&
(
0xFFFF
-
7
)
# align to an 8-byte boundary
padLen
=
eLen
-
cLen
hdr
=
[
self
.
version
,
self
.
recType
,
self
.
reqId
>>
8
,
self
.
reqId
&
0xFF
,
cLen
>>
8
,
cLen
&
0xFF
,
padLen
,
0
]
hdr
=
string
.
join
(
map
(
chr
,
hdr
),
''
)
return
hdr
+
content
+
padLen
*
'
\
000
'
#----------------------------------------------------------------------
class
FCGIChannel
(
asynchat
.
async_chat
):
"""
Process a FastCGI connection. This class implements most of the
Application Server side of the protocol defined in
http://www.fastcgi.com/fcgi-devkit-2.1/doc/fcgi-spec.html (which is
the FastCGI Specification 1.0 from Open Market, Inc.) in a manner
that is compatible with the asyncore medusa engine of ZServer.
The main ommission from the spec is support for multiplexing
multiple requests on a single connection, but since none of the
web servers support it (that I know of,) and since ZServer can
easily multiplex multiple connections in the same process, it's no
great loss.
"""
closed
=
0
using_temp_stdin
=
None
def
__init__
(
self
,
server
,
sock
,
addr
):
self
.
server
=
server
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
requestCloseOnExec
(
sock
)
self
.
setInitialState
()
self
.
remainingRecs
=
1
# We have to read at least one
self
.
env
=
{}
self
.
stdin
=
StringIO
()
self
.
filterData
=
StringIO
()
# not currently used, but maybe someday
self
.
requestId
=
0
def
setInitialState
(
self
):
self
.
data
=
StringIO
()
self
.
curRec
=
None
self
.
set_terminator
(
8
)
# FastCGI record header size.
def
readable
(
self
):
return
self
.
remainingRecs
!=
0
def
collect_incoming_data
(
self
,
data
):
self
.
data
.
write
(
data
)
def
found_terminator
(
self
):
# Are we starting a new record? If so, data is the header.
if
not
self
.
curRec
:
self
.
curRec
=
FCGIRecord
(
self
.
data
.
getvalue
())
if
self
.
curRec
.
needMore
():
self
.
set_terminator
(
self
.
curRec
.
needMore
())
self
.
data
=
StringIO
()
return
rec
=
self
.
curRec
# If waiting for record content, give it to the record.
if
rec
.
needContent
():
rec
.
parseContent
(
self
.
data
.
getvalue
())
if
rec
.
needMore
():
self
.
set_terminator
(
rec
.
needMore
())
self
.
data
=
StringIO
()
return
if
rec
.
needPadding
():
rec
.
gotPadding
()
# If we get this far without returning, we've got the whole
# record. Figure out what to do with it.
if
rec
.
recType
in
FCGI_ManagementTypes
:
# Apache mod_fastcgi doesn't send these, but others may
self
.
handleManagementTypes
(
rec
)
elif
rec
.
reqId
==
0
:
# It's a management record of unknown type.
# Complain about it...
r2
=
FCGIRecord
()
r2
.
recType
=
FCGI_UNKNOWN_TYPE
r2
.
unknownType
=
rec
.
recType
self
.
push
(
r2
.
getRecordAsString
(),
0
)
# Since we don't actually have to do anything to ignore the
# following conditions, they have been commented out and have
# been left in the code for documentation purposes.
# Ignore requests that aren't active
# elif rec.reqId != self.requestId and rec.recType != FCGI_BEGIN_REQUEST:
# pass
#
# If we're already doing a request, ignore further BEGIN_REQUESTs
# elif rec.recType == FCGI_BEGIN_REQUEST and self.requestId != 0:
# pass
# Begin a new request
elif
rec
.
recType
==
FCGI_BEGIN_REQUEST
and
self
.
requestId
==
0
:
self
.
requestId
=
rec
.
reqId
if
rec
.
role
==
FCGI_AUTHORIZER
:
self
.
remainingRecs
=
1
elif
rec
.
role
==
FCGI_RESPONDER
:
self
.
remainingRecs
=
2
elif
rec
.
role
==
FCGI_FILTER
:
self
.
remainingRecs
=
3
# Read some name-value pairs (the CGI environment)
elif
rec
.
recType
==
FCGI_PARAMS
:
if
rec
.
contentLength
==
0
:
# end of the stream
if
self
.
env
.
has_key
(
'REQUEST_METHOD'
):
method
=
self
.
env
[
'REQUEST_METHOD'
]
else
:
method
=
'GET'
if
self
.
env
.
has_key
(
'PATH_INFO'
):
path
=
self
.
env
[
'PATH_INFO'
]
else
:
path
=
''
DebugLogger
.
log
(
'B'
,
id
(
self
),
'%s %s'
%
(
method
,
path
))
self
.
remainingRecs
=
self
.
remainingRecs
-
1
self
.
content_length
=
string
.
atoi
(
self
.
env
.
get
(
'CONTENT_LENGTH'
,
'0'
))
else
:
self
.
env
.
update
(
rec
.
values
)
# read some stdin data
elif
rec
.
recType
==
FCGI_STDIN
:
if
rec
.
contentLength
==
0
:
# end of the stream
self
.
remainingRecs
=
self
.
remainingRecs
-
1
else
:
# see if stdin is getting too big, and
# replace it with a tempfile if necessary
if
len
(
rec
.
content
)
+
self
.
stdin
.
tell
()
>
1048576
and
\
not
self
.
using_temp_stdin
:
t
=
TemporaryFile
()
t
.
write
(
self
.
stdin
.
getvalue
())
self
.
stdin
=
t
self
.
using_temp_stdin
=
1
self
.
stdin
.
write
(
rec
.
content
)
# read some filter data
elif
rec
.
recType
==
FCGI_DATA
:
if
rec
.
contentLength
==
0
:
# end of the stream
self
.
remainingRecs
=
self
.
remainingRecs
-
1
else
:
self
.
filterData
.
write
(
rec
.
content
)
# We've processed the record. Now what do we do?
if
self
.
remainingRecs
>
0
:
# prepare to get the next record
self
.
setInitialState
()
else
:
# We've got them all. Let ZPublisher do its thang.
DebugLogger
.
log
(
'I'
,
id
(
self
),
self
.
stdin
.
tell
())
# But first, fixup the auth header if using newest mod_fastcgi.
if
self
.
env
.
has_key
(
'Authorization'
):
self
.
env
[
'HTTP_AUTHORIZATION'
]
=
self
.
env
[
'Authorization'
]
del
self
.
env
[
'Authorization'
]
self
.
stdin
.
seek
(
0
)
self
.
send_response
()
def
send_response
(
self
):
"""
Create output pipes, request, and response objects. Give them
to ZPublisher for processing.
"""
response
=
FCGIResponse
(
stdout
=
FCGIPipe
(
self
,
FCGI_STDOUT
),
stderr
=
StringIO
())
response
.
setChannel
(
self
)
request
=
HTTPRequest
(
self
.
stdin
,
self
.
env
,
response
)
handle
(
self
.
server
.
module
,
request
,
response
)
def
log_request
(
self
,
bytes
):
DebugLogger
.
log
(
'E'
,
id
(
self
))
if
self
.
env
.
has_key
(
'HTTP_USER_AGENT'
):
user_agent
=
self
.
env
[
'HTTP_USER_AGENT'
]
else
:
user_agent
=
''
if
self
.
env
.
has_key
(
'HTTP_REFERER'
):
referer
=
self
.
env
[
'HTTP_REFERER'
]
else
:
referer
=
''
if
self
.
env
.
has_key
(
'PATH_INFO'
):
path
=
self
.
env
[
'PATH_INFO'
]
else
:
path
=
''
if
self
.
env
.
has_key
(
'REQUEST_METHOD'
):
method
=
self
.
env
[
'REQUEST_METHOD'
]
else
:
method
=
"GET"
if
self
.
addr
:
self
.
server
.
logger
.
log
(
self
.
addr
[
0
],
'%s - - [%s] "%s %s" %d %d "%s" "%s"'
%
(
self
.
addr
[
1
],
time
.
strftime
(
'%d/%b/%Y:%H:%M:%S '
,
time
.
localtime
(
time
.
time
())
)
+
tz_for_log
,
method
,
path
,
self
.
reply_code
,
bytes
,
referer
,
user_agent
)
)
else
:
self
.
server
.
logger
.
log
(
'127.0.0.1 '
,
'- - [%s] "%s %s" %d %d "%s" "%s"'
%
(
time
.
strftime
(
'%d/%b/%Y:%H:%M:%S '
,
time
.
localtime
(
time
.
time
())
)
+
tz_for_log
,
method
,
path
,
self
.
reply_code
,
bytes
,
referer
,
user_agent
)
)
def
handleManagementTypes
(
self
,
rec
):
"""
The web server has asked us what features we support...
"""
if
rec
.
recType
==
FCGI_GET_VALUES
:
rec
.
recType
=
FCGI_GET_VALUES_RESULT
vars
=
{
'FCGI_MAX_CONNS'
:
FCGI_MAX_CONNS
,
'FCGI_MAX_REQS'
:
FCGI_MAX_REQS
,
'FCGI_MPXS_CONNS'
:
FCGI_MPXS_CONNS
}
rec
.
values
=
vars
self
.
push
(
rec
.
getRecordAsString
(),
0
)
def
sendDataRecord
(
self
,
data
,
recType
):
rec
=
FCGIRecord
()
rec
.
recType
=
recType
rec
.
reqId
=
self
.
requestId
# Can't send more than 64K minus header size. 8K seems about right.
if
type
(
data
)
==
type
(
''
):
# send some string data
while
data
:
chunk
=
data
[:
8192
]
data
=
data
[
8192
:]
rec
.
content
=
chunk
self
.
push
(
rec
.
getRecordAsString
(),
0
)
else
:
# send a producer
p
,
cLen
=
data
eLen
=
(
cLen
+
7
)
&
(
0xFFFF
-
7
)
# align to an 8-byte boundary
padLen
=
eLen
-
cLen
hdr
=
[
rec
.
version
,
rec
.
recType
,
rec
.
reqId
>>
8
,
rec
.
reqId
&
0xFF
,
cLen
>>
8
,
cLen
&
0xFF
,
padLen
,
0
]
hdr
=
string
.
join
(
map
(
chr
,
hdr
),
''
)
self
.
push
(
hdr
,
0
)
self
.
push
(
p
,
0
)
self
.
push
(
padLen
*
'
\
000
'
,
0
)
def
sendStreamTerminator
(
self
,
recType
):
rec
=
FCGIRecord
()
rec
.
recType
=
recType
rec
.
reqId
=
self
.
requestId
rec
.
content
=
""
self
.
push
(
rec
.
getRecordAsString
(),
0
)
def
sendEndRecord
(
self
,
appStatus
=
0
):
rec
=
FCGIRecord
()
rec
.
recType
=
FCGI_END_REQUEST
rec
.
reqId
=
self
.
requestId
rec
.
protocolStatus
=
FCGI_REQUEST_COMPLETE
rec
.
appStatus
=
appStatus
self
.
push
(
rec
.
getRecordAsString
(),
0
)
self
.
requestId
=
0
def
push
(
self
,
producer
,
send
=
1
):
# this is thread-safe when send is false
# note, that strings are not wrapped in
# producers by default
if
self
.
closed
:
return
self
.
producer_fifo
.
push
(
producer
)
if
send
:
self
.
initiate_send
()
push_with_producer
=
push
def
close
(
self
):
self
.
closed
=
1
while
self
.
producer_fifo
:
p
=
self
.
producer_fifo
.
first
()
if
p
is
not
None
and
type
(
p
)
!=
StringType
:
p
.
more
()
# free up resources held by producer
self
.
producer_fifo
.
pop
()
asyncore
.
dispatcher
.
close
(
self
)
#----------------------------------------------------------------------
class
FCGIServer
(
asyncore
.
dispatcher
):
"""
Listens for and accepts FastCGI requests and hands them off to a
FCGIChannel for handling.
FCGIServer can be configured to listen on either a specific port
(for inet sockets) or socket_file (for unix domain sockets.)
For inet sockets, the ip argument specifies the address from which
the server will accept connections, '' indicates all addresses. If
you only want to accept connections from the localhost, set ip to
'127.0.0.1'.
"""
channel_class
=
FCGIChannel
def
__init__
(
self
,
module
=
'Main'
,
ip
=
'127.0.0.1'
,
port
=
None
,
socket_file
=
None
,
resolver
=
None
,
logger_object
=
None
):
self
.
ip
=
ip
self
.
count
=
counter
()
asyncore
.
dispatcher
.
__init__
(
self
)
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
# get configuration
self
.
module
=
module
self
.
port
=
port
self
.
socket_file
=
socket_file
# setup sockets
if
self
.
port
:
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
self
.
ip
,
self
.
port
))
else
:
try
:
os
.
unlink
(
self
.
socket_file
)
except
os
.
error
:
pass
self
.
create_socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
(
self
.
socket_file
)
try
:
os
.
chmod
(
self
.
socket_file
,
0777
)
except
os
.
error
:
pass
self
.
listen
(
256
)
self
.
log_info
(
'FastCGI Server (V%s) started at %s
\
n
'
'
\
t
IP : %s
\
n
'
'
\
t
Port : %s
\
n
'
'
\
t
Socket path : %s
\
n
'
%
(
__version__
,
time
.
ctime
(
time
.
time
()),
self
.
ip
,
self
.
port
,
self
.
socket_file
))
def
handle_accept
(
self
):
self
.
count
.
increment
()
try
:
conn
,
addr
=
self
.
accept
()
except
socket
.
error
:
self
.
log_info
(
'Server accept() threw an exception'
,
'warning'
)
return
self
.
channel_class
(
self
,
conn
,
addr
)
def
readable
(
self
):
return
len
(
asyncore
.
socket_map
)
<
CONNECTION_LIMIT
def
writable
(
self
):
return
0
def
create_socket
(
self
,
family
,
type
):
asyncore
.
dispatcher
.
create_socket
(
self
,
family
,
type
)
requestCloseOnExec
(
self
.
socket
)
def
listen
(
self
,
num
):
# override asyncore limits for nt's listen queue size
self
.
accepting
=
1
return
self
.
socket
.
listen
(
num
)
#----------------------------------------------------------------------
class
FCGIResponse
(
HTTPResponse
):
_tempfile
=
None
_templock
=
None
_tempstart
=
0
def
setChannel
(
self
,
channel
):
self
.
channel
=
channel
def
write
(
self
,
data
):
stdout
=
self
.
stdout
if
not
self
.
_wrote
:
l
=
self
.
headers
.
get
(
'content-length'
,
None
)
if
l
is
not
None
:
try
:
if
type
(
l
)
is
type
(
''
):
l
=
string
.
atoi
(
l
)
if
l
>
128000
:
self
.
_tempfile
=
TemporaryFile
()
self
.
_templock
=
thread
.
allocate_lock
()
except
:
pass
stdout
.
write
(
str
(
self
))
self
.
_wrote
=
1
if
not
data
:
return
t
=
self
.
_tempfile
if
t
is
None
:
stdout
.
write
(
data
)
else
:
while
data
:
# write file producers
# each producer holds 32K data
chunk
=
data
[:
32768
]
data
=
data
[
32768
:]
l
=
len
(
chunk
)
b
=
self
.
_tempstart
e
=
b
+
l
self
.
_templock
.
acquire
()
try
:
t
.
seek
(
b
)
t
.
write
(
chunk
)
finally
:
self
.
_templock
.
release
()
self
.
_tempstart
=
e
stdout
.
write
((
file_part_producer
(
t
,
self
.
_templock
,
b
,
e
),
l
))
def
_finish
(
self
):
self
.
channel
.
reply_code
=
self
.
status
DebugLogger
.
log
(
'A'
,
id
(
self
.
channel
),
'%d %d'
%
(
self
.
status
,
self
.
stdout
.
length
))
t
=
self
.
_tempfile
if
t
is
not
None
:
self
.
stdout
.
write
((
file_close_producer
(
t
),
0
))
self
.
_tempfile
=
None
self
.
channel
.
sendStreamTerminator
(
FCGI_STDOUT
)
self
.
channel
.
sendEndRecord
()
self
.
stdout
.
close
()
self
.
stderr
.
close
()
if
not
self
.
channel
.
closed
:
self
.
channel
.
push_with_producer
(
LoggingProducer
(
self
.
channel
,
self
.
stdout
.
length
,
'log_request'
),
0
)
if
self
.
_shutdownRequested
():
self
.
channel
.
push
(
ShutdownProducer
(),
0
)
Wakeup
(
lambda
:
asyncore
.
close_all
())
else
:
self
.
channel
.
push
(
None
,
0
)
Wakeup
()
self
.
channel
=
None
#----------------------------------------------------------------------
class
FCGIPipe
:
"""
This class acts like a file and is used to catch stdout/stderr
from ZPublisher and create FCGI records out of the data stream to
send back to the web server.
"""
def
__init__
(
self
,
channel
,
recType
):
self
.
channel
=
channel
self
.
recType
=
recType
self
.
length
=
0
def
write
(
self
,
data
):
if
type
(
data
)
==
type
(
''
):
datalen
=
len
(
data
)
else
:
p
,
datalen
=
data
if
data
:
self
.
channel
.
sendDataRecord
(
data
,
self
.
recType
)
self
.
length
=
self
.
length
+
datalen
def
close
(
self
):
self
.
channel
=
None
#----------------------------------------------------------------------
lib/python/ZServer/FTPRequest.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
FTP Request class for FTP server.
The FTP Request does the dirty work of turning an FTP request into something
that ZPublisher can understand.
"""
from
ZPublisher.HTTPRequest
import
HTTPRequest
from
cStringIO
import
StringIO
import
os
from
base64
import
encodestring
import
re
class
FTPRequest
(
HTTPRequest
):
def
__init__
(
self
,
path
,
command
,
channel
,
response
,
stdin
=
None
,
environ
=
None
,
globbing
=
None
,
recursive
=
0
):
# we need to store the globbing information to pass it
# to the ZPublisher and the manage_FTPlist function
# (ajung)
self
.
globbing
=
globbing
self
.
recursive
=
recursive
if
stdin
is
None
:
stdin
=
StringIO
()
if
environ
is
None
:
environ
=
self
.
_get_env
(
path
,
command
,
channel
,
stdin
)
self
.
_orig_env
=
environ
HTTPRequest
.
__init__
(
self
,
stdin
,
environ
,
response
,
clean
=
1
)
# support for cookies and cookie authentication
self
.
cookies
=
channel
.
cookies
if
not
self
.
cookies
.
has_key
(
'__ac'
)
and
channel
.
userid
!=
'anonymous'
:
self
.
other
[
'__ac_name'
]
=
channel
.
userid
self
.
other
[
'__ac_password'
]
=
channel
.
password
for
k
,
v
in
self
.
cookies
.
items
():
if
not
self
.
other
.
has_key
(
k
):
self
.
other
[
k
]
=
v
def
retry
(
self
):
self
.
retry_count
=
self
.
retry_count
+
1
r
=
self
.
__class__
(
stdin
=
self
.
stdin
,
environ
=
self
.
_orig_env
,
response
=
self
.
response
.
retry
(),
channel
=
self
,
# For my cookies
)
return
r
def
_get_env
(
self
,
path
,
command
,
channel
,
stdin
):
"Returns a CGI style environment"
env
=
{}
env
[
'SCRIPT_NAME'
]
=
'/%s'
%
channel
.
module
env
[
'REQUEST_METHOD'
]
=
'GET'
# XXX what should this be?
env
[
'SERVER_SOFTWARE'
]
=
channel
.
server
.
SERVER_IDENT
if
channel
.
userid
!=
'anonymous'
:
env
[
'HTTP_AUTHORIZATION'
]
=
'Basic %s'
%
re
.
sub
(
'
\
012
'
,
''
,
encodestring
(
'%s:%s'
%
(
channel
.
userid
,
channel
.
password
)))
env
[
'SERVER_NAME'
]
=
channel
.
server
.
hostname
env
[
'SERVER_PORT'
]
=
str
(
channel
.
server
.
port
)
env
[
'REMOTE_ADDR'
]
=
channel
.
client_addr
[
0
]
env
[
'GATEWAY_INTERFACE'
]
=
'CGI/1.1'
# that's stretching it ;-)
# FTP commands
#
if
type
(
command
)
==
type
(()):
args
=
command
[
1
:]
command
=
command
[
0
]
if
command
in
(
'LST'
,
'CWD'
,
'PASS'
):
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
,
'manage_FTPlist'
)
elif
command
in
(
'MDTM'
,
'SIZE'
):
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
,
'manage_FTPstat'
)
elif
command
==
'RETR'
:
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
,
'manage_FTPget'
)
elif
command
in
(
'RMD'
,
'DELE'
):
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
,
'manage_delObjects'
)
env
[
'QUERY_STRING'
]
=
'ids=%s'
%
args
[
0
]
elif
command
==
'MKD'
:
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
,
'manage_addFolder'
)
env
[
'QUERY_STRING'
]
=
'id=%s'
%
args
[
0
]
elif
command
==
'RNTO'
:
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
,
'manage_renameObject'
)
env
[
'QUERY_STRING'
]
=
'id=%s&new_id=%s'
%
(
args
[
0
],
args
[
1
])
elif
command
==
'STOR'
:
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
)
env
[
'REQUEST_METHOD'
]
=
'PUT'
env
[
'CONTENT_LENGTH'
]
=
len
(
stdin
.
getvalue
())
else
:
env
[
'PATH_INFO'
]
=
self
.
_join_paths
(
channel
.
path
,
path
,
command
)
# Fake in globbing information
env
[
'GLOBBING'
]
=
self
.
globbing
env
[
'FTP_RECURSIVE'
]
=
self
.
recursive
return
env
def
_join_paths
(
self
,
*
args
):
path
=
apply
(
os
.
path
.
join
,
args
)
path
=
os
.
path
.
normpath
(
path
)
if
os
.
sep
!=
'/'
:
path
=
path
.
replace
(
os
.
sep
,
'/'
)
return
path
lib/python/ZServer/FTPResponse.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Response class for the FTP Server.
"""
from
ZServer.HTTPResponse
import
ZServerHTTPResponse
from
PubCore.ZEvent
import
Wakeup
from
cStringIO
import
StringIO
import
marshal
class
FTPResponse
(
ZServerHTTPResponse
):
"""
Response to an FTP command
"""
def
__str__
(
self
):
# return ZServerHTTPResponse.__str__(self)
# ZServerHTTPResponse.__str__(self) return HTTP headers
# Why should be send them to the FTP client ??? (ajung)
return
''
def
outputBody
(
self
):
pass
def
setCookie
(
self
,
name
,
value
,
**
kw
):
self
.
cookies
[
name
]
=
value
def
appendCookie
(
self
,
name
,
value
):
self
.
cookies
[
name
]
=
self
.
cookies
[
name
]
+
value
def
expireCookie
(
self
,
name
,
**
kw
):
if
self
.
cookies
.
has_key
(
name
):
del
self
.
cookies
[
name
]
def
_cookie_list
(
self
):
return
[]
def
_marshalledBody
(
self
):
return
marshal
.
loads
(
self
.
body
)
def
setMessage
(
self
,
message
):
self
.
_message
=
message
def
getMessage
(
self
):
return
getattr
(
self
,
'_message'
,
''
)
class
CallbackPipe
:
"""
Sends response object to a callback. Doesn't write anything.
The callback takes place in Medusa's thread, not the request thread.
"""
def
__init__
(
self
,
callback
,
args
):
self
.
_callback
=
callback
self
.
_args
=
args
self
.
_producers
=
[]
def
close
(
self
):
pass
def
write
(
self
,
text
,
l
=
None
):
if
text
:
self
.
_producers
.
append
(
text
)
def
finish
(
self
,
response
):
self
.
_response
=
response
Wakeup
(
self
.
apply
)
# move callback to medusas thread
def
apply
(
self
):
result
=
apply
(
self
.
_callback
,
self
.
_args
+
(
self
.
_response
,))
# break cycles
self
.
_callback
=
None
self
.
_response
=
None
self
.
_args
=
None
return
result
def
make_response
(
channel
,
callback
,
*
args
):
# XXX should this be the FTPResponse constructor instead?
r
=
FTPResponse
(
stdout
=
CallbackPipe
(
callback
,
args
),
stderr
=
StringIO
())
r
.
setHeader
(
'content-type'
,
'text/plain'
)
r
.
cookies
=
channel
.
cookies
return
r
lib/python/ZServer/FTPServer.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZServer FTP Channel for use the medusa's ftp server.
FTP Service for Zope.
This server allows FTP connections to Zope. In general FTP is used
to manage content. You can:
* Create and delete Folders, Documents, Files, and Images
* Edit the contents of Documents, Files, Images
In the future, FTP may be used to edit object properties.
FTP Protocol
The FTP protocol for Zope gives Zope objects a way to make themselves
available to FTP services. See the 'lib/python/OFS/FTPInterface.py' for
more details.
FTP Permissions
FTP access is controlled by one permission: 'FTP access' if bound to a
role, users of that role will be able to list directories, and cd to
them. Creating and deleting and changing objects are all governed by
existing Zope permissions.
Permissions are to a certain extent reflected in the permission bits
listed in FTP file listings.
FTP Authorization
Zope supports both normal and anonymous logins. It can be difficult
to authorize Zope users since they are defined in distributed user
databases. Normally, all logins will be accepted and then the user must
proceed to 'cd' to a directory in which they are authorized. In this
case for the purpose of FTP limits, the user is considered anonymous
until they cd to an authorized directory.
Optionally, users can login with a special username which indicates
where they are defined. Their login will then be authenticated in
the indicated directory, and they will not be considered anonymous.
The form of the name is '<username>@<path>' where path takes the form
'<folder id>[/<folder id>...]' For example: 'amos@Foo/Bar' This will
authenticate the user 'amos' in the directory '/Foo/Bar'. In addition
the user's FTP session will be rooted in the authenticated directory,
i.e. they will not be able to cd out of the directory.
The main reason to use the rooted FTP login, is to allow non-anonymous
logins. This may be handy, if for example, you disallow anonymous logins,
or if you set the limit for simultaneous anonymous logins very low.
"""
from
PubCore
import
handle
from
medusa.ftp_server
import
ftp_channel
,
ftp_server
,
recv_channel
import
asyncore
,
asynchat
from
medusa
import
filesys
from
FTPResponse
import
make_response
from
FTPRequest
import
FTPRequest
from
ZServer
import
CONNECTION_LIMIT
,
requestCloseOnExec
from
cStringIO
import
StringIO
import
os
from
mimetypes
import
guess_type
import
marshal
import
stat
import
time
class
zope_ftp_channel
(
ftp_channel
):
"Passes its commands to Zope, not a filesystem"
read_only
=
0
anonymous
=
1
def
__init__
(
self
,
server
,
conn
,
addr
,
module
):
ftp_channel
.
__init__
(
self
,
server
,
conn
,
addr
)
requestCloseOnExec
(
conn
)
self
.
module
=
module
self
.
userid
=
''
self
.
password
=
''
self
.
path
=
'/'
self
.
cookies
=
{}
def
_join_paths
(
self
,
*
args
):
path
=
apply
(
os
.
path
.
join
,
args
)
path
=
os
.
path
.
normpath
(
path
)
if
os
.
sep
!=
'/'
:
path
=
path
.
replace
(
os
.
sep
,
'/'
)
return
path
# Overriden async_chat methods
def
push
(
self
,
producer
,
send
=
1
):
# this is thread-safe when send is false
# note, that strings are not wrapped in
# producers by default
self
.
producer_fifo
.
push
(
producer
)
if
send
:
self
.
initiate_send
()
push_with_producer
=
push
# Overriden ftp_channel methods
def
cmd_nlst
(
self
,
line
):
'give name list of files in directory'
self
.
get_dir_list
(
line
,
0
)
def
cmd_list
(
self
,
line
):
'give list files in a directory'
# handles files as well as directories.
# XXX also should maybe handle globbing, yuck.
self
.
get_dir_list
(
line
,
1
)
def
get_dir_list
(
self
,
line
,
long
=
0
):
self
.
globbing
=
None
self
.
recursive
=
0
# we need to scan the command line for arguments to '/bin/ls'...
# XXX clean this up, maybe with getopts
if
len
(
line
)
>
1
:
args
=
line
[
1
].
split
()
else
:
args
=
[]
path_args
=
[]
# Extract globbing information
for
i
in
range
(
len
(
args
)):
x
=
args
[
i
]
if
x
.
find
(
'*'
)
!=-
1
or
x
.
find
(
'?'
)
!=-
1
:
self
.
globbing
=
x
args
[
i
]
=
'.'
for
arg
in
args
:
if
arg
[
0
]
!=
'-'
:
path_args
.
append
(
arg
)
else
:
if
'l'
in
arg
:
long
=
1
if
'R'
in
arg
:
self
.
recursive
=
1
if
len
(
path_args
)
<
1
:
dir
=
'.'
else
:
dir
=
path_args
[
0
]
self
.
listdir
(
dir
,
long
)
def
listdir
(
self
,
path
,
long
=
0
):
response
=
make_response
(
self
,
self
.
listdir_completion
,
long
)
request
=
FTPRequest
(
path
,
'LST'
,
self
,
response
,
globbing
=
self
.
globbing
,
recursive
=
self
.
recursive
)
handle
(
self
.
module
,
request
,
response
)
def
listdir_completion
(
self
,
long
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
:
if
self
.
anonymous
and
not
self
.
userid
==
'anonymous'
:
self
.
anonymous
=
None
dir_list
=
''
file_infos
=
response
.
_marshalledBody
()
if
type
(
file_infos
[
0
])
==
type
(
''
):
file_infos
=
(
file_infos
,)
if
long
:
for
id
,
stat_info
in
file_infos
:
dir_list
=
dir_list
+
filesys
.
unix_longify
(
id
,
stat_info
)
+
'
\
r
\
n
'
else
:
for
id
,
stat_info
in
file_infos
:
dir_list
=
dir_list
+
id
+
'
\
r
\
n
'
self
.
make_xmit_channel
()
self
.
client_dc
.
push
(
dir_list
)
self
.
client_dc
.
close_when_done
()
self
.
respond
(
'150 Opening %s mode data connection for file list'
%
(
self
.
type_map
[
self
.
current_mode
]
)
)
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 Could not list directory.'
)
def
cmd_cwd
(
self
,
line
):
'change working directory'
response
=
make_response
(
self
,
self
.
cwd_completion
,
self
.
_join_paths
(
self
.
path
,
line
[
1
]))
request
=
FTPRequest
(
line
[
1
],
'CWD'
,
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
def
cwd_completion
(
self
,
path
,
response
):
'cwd completion callback'
status
=
response
.
getStatus
()
if
status
==
200
:
listing
=
response
.
_marshalledBody
()
# check to see if we are cding to a non-foldoid object
if
type
(
listing
[
0
])
==
type
(
''
):
self
.
respond
(
'550 No such directory.'
)
return
else
:
self
.
path
=
path
or
'/'
self
.
respond
(
'250 CWD command successful.'
)
# now that we've sucussfully cd'd perhaps we are no
# longer anonymous
if
self
.
anonymous
and
not
self
.
userid
==
'anonymous'
:
self
.
anonymous
=
None
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 No such directory.'
)
def
cmd_cdup
(
self
,
line
):
'change to parent of current working directory'
self
.
cmd_cwd
((
None
,
'..'
))
def
cmd_pwd
(
self
,
line
):
'print the current working directory'
self
.
respond
(
'257 "%s" is the current directory.'
%
(
self
.
path
)
)
cmd_xpwd
=
cmd_pwd
def
cmd_mdtm
(
self
,
line
):
'show last modification time of file'
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
' '
.
join
(
line
))
return
response
=
make_response
(
self
,
self
.
mdtm_completion
)
request
=
FTPRequest
(
line
[
1
],
'MDTM'
,
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
def
mdtm_completion
(
self
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
:
mtime
=
response
.
_marshalledBody
()[
stat
.
ST_MTIME
]
mtime
=
time
.
gmtime
(
mtime
)
self
.
respond
(
'213 %4d%02d%02d%02d%02d%02d'
%
(
mtime
[
0
],
mtime
[
1
],
mtime
[
2
],
mtime
[
3
],
mtime
[
4
],
mtime
[
5
]
))
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 Error getting file modification time.'
)
def
cmd_size
(
self
,
line
):
'return size of file'
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
' '
.
join
(
line
))
return
response
=
make_response
(
self
,
self
.
size_completion
)
request
=
FTPRequest
(
line
[
1
],
'SIZE'
,
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
def
size_completion
(
self
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
:
self
.
respond
(
'213 %d'
%
response
.
_marshalledBody
()[
stat
.
ST_SIZE
])
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 Error getting file size.'
)
#self.client_dc.close_when_done()
def
cmd_retr
(
self
,
line
):
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
' '
.
join
(
line
))
return
response
=
make_response
(
self
,
self
.
retr_completion
,
line
[
1
])
self
.
_response_producers
=
response
.
stdout
.
_producers
request
=
FTPRequest
(
line
[
1
],
'RETR'
,
self
,
response
)
# Support download restarts if possible.
if
self
.
restart_position
>
0
:
request
.
environ
[
'HTTP_RANGE'
]
=
'bytes=%d-'
%
self
.
restart_position
handle
(
self
.
module
,
request
,
response
)
def
retr_completion
(
self
,
file
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
:
self
.
make_xmit_channel
()
if
not
response
.
_wrote
:
self
.
client_dc
.
push
(
response
.
body
)
else
:
for
producer
in
self
.
_response_producers
:
self
.
client_dc
.
push_with_producer
(
producer
)
self
.
_response_producers
=
None
self
.
client_dc
.
close_when_done
()
self
.
respond
(
"150 Opening %s mode data connection for file '%s'"
%
(
self
.
type_map
[
self
.
current_mode
],
file
))
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 Error opening file.'
)
def
cmd_stor
(
self
,
line
,
mode
=
'wb'
):
'store a file'
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
' '
.
join
(
line
))
return
elif
self
.
restart_position
:
restart_position
=
0
self
.
respond
(
'553 restart on STOR not yet supported'
)
return
# XXX Check for possible problems first?
# Right now we are limited in the errors we can issue, since
# we agree to accept the file before checking authorization
fd
=
ContentReceiver
(
self
.
stor_callback
,
line
[
1
])
self
.
respond
(
'150 Opening %s connection for %s'
%
(
self
.
type_map
[
self
.
current_mode
],
line
[
1
]
)
)
self
.
make_recv_channel
(
fd
)
def
stor_callback
(
self
,
path
,
data
):
'callback to do the STOR, after we have the input'
response
=
make_response
(
self
,
self
.
stor_completion
)
request
=
FTPRequest
(
path
,
'STOR'
,
self
,
response
,
stdin
=
data
)
handle
(
self
.
module
,
request
,
response
)
def
stor_completion
(
self
,
response
):
status
=
response
.
getStatus
()
message
=
response
.
getMessage
()
if
status
in
(
200
,
201
,
204
,
302
):
self
.
client_dc
.
channel
.
respond
(
'226 '
+
(
message
or
'Transfer complete.'
))
elif
status
==
401
:
self
.
client_dc
.
channel
.
respond
(
'426 '
+
(
message
or
'Unauthorized.'
))
else
:
self
.
client_dc
.
channel
.
respond
(
'426 '
+
(
message
or
'Error creating file.'
))
self
.
client_dc
.
close
()
def
cmd_rnfr
(
self
,
line
):
'rename from'
if
len
(
line
)
!=
2
:
self
.
command_not_understood
(
' '
.
join
(
line
))
else
:
self
.
fromfile
=
line
[
1
]
self
.
respond
(
'350 RNFR command successful.'
)
def
cmd_rnto
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command_not_understood
(
' '
.
join
(
line
))
return
pathf
,
idf
=
os
.
path
.
split
(
self
.
fromfile
)
patht
,
idt
=
os
.
path
.
split
(
line
[
1
])
response
=
make_response
(
self
,
self
.
rnto_completion
)
request
=
FTPRequest
(
pathf
,(
'RNTO'
,
idf
,
idt
),
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
def
rnto_completion
(
self
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
:
self
.
respond
(
'250 RNTO command successful.'
)
else
:
self
.
respond
(
'550 error renaming file.'
)
def
cmd_dele
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
' '
.
join
(
line
))
return
path
,
id
=
os
.
path
.
split
(
line
[
1
])
response
=
make_response
(
self
,
self
.
dele_completion
)
request
=
FTPRequest
(
path
,(
'DELE'
,
id
),
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
def
dele_completion
(
self
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
and
response
.
body
.
find
(
'Not Deletable'
)
==-
1
:
self
.
respond
(
'250 DELE command successful.'
)
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 Error deleting file.'
)
def
cmd_mkd
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
' '
.
join
(
line
))
return
path
,
id
=
os
.
path
.
split
(
line
[
1
])
response
=
make_response
(
self
,
self
.
mkd_completion
)
request
=
FTPRequest
(
path
,(
'MKD'
,
id
),
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
cmd_xmkd
=
cmd_mkd
def
mkd_completion
(
self
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
:
self
.
respond
(
'257 MKD command successful.'
)
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 Error creating directory.'
)
def
cmd_rmd
(
self
,
line
):
# XXX should object be checked to see if it's folderish
# before we allow it to be RMD'd?
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
' '
.
join
(
line
))
return
path
,
id
=
os
.
path
.
split
(
line
[
1
])
response
=
make_response
(
self
,
self
.
rmd_completion
)
request
=
FTPRequest
(
path
,(
'RMD'
,
id
),
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
cmd_xrmd
=
cmd_rmd
def
rmd_completion
(
self
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
and
response
.
body
.
find
(
'Not Deletable'
)
==-
1
:
self
.
respond
(
'250 RMD command successful.'
)
elif
status
==
401
:
self
.
respond
(
'530 Unauthorized.'
)
else
:
self
.
respond
(
'550 Error removing directory.'
)
def
cmd_user
(
self
,
line
):
'specify user name'
if
len
(
line
)
>
1
:
self
.
userid
=
line
[
1
]
self
.
respond
(
'331 Password required.'
)
else
:
self
.
command_not_understood
(
' '
.
join
(
line
))
def
cmd_pass
(
self
,
line
):
'specify password'
if
len
(
line
)
<
2
:
pw
=
''
else
:
pw
=
line
[
1
]
self
.
password
=
pw
i
=
self
.
userid
.
find
(
'@'
)
if
i
==-
1
:
if
self
.
server
.
limiter
.
check_limit
(
self
):
self
.
respond
(
'230 Login successful.'
)
self
.
authorized
=
1
self
.
anonymous
=
1
self
.
log_info
(
'Successful login.'
)
else
:
self
.
respond
(
'421 User limit reached. Closing connection.'
)
self
.
close_when_done
()
else
:
path
=
self
.
userid
[
i
+
1
:]
self
.
userid
=
self
.
userid
[:
i
]
self
.
anonymous
=
None
response
=
make_response
(
self
,
self
.
pass_completion
,
self
.
_join_paths
(
'/'
,
path
))
request
=
FTPRequest
(
path
,
'PASS'
,
self
,
response
)
handle
(
self
.
module
,
request
,
response
)
def
pass_completion
(
self
,
path
,
response
):
status
=
response
.
getStatus
()
if
status
==
200
:
if
not
self
.
server
.
limiter
.
check_limit
(
self
):
self
.
close_when_done
()
self
.
respond
(
'421 User limit reached. Closing connection.'
)
return
listing
=
response
.
_marshalledBody
()
# check to see if we are cding to a non-foldoid object
if
type
(
listing
[
0
])
==
type
(
''
):
self
.
respond
(
'530 Unauthorized.'
)
return
self
.
path
=
path
or
'/'
self
.
authorized
=
1
if
self
.
userid
==
'anonymous'
:
self
.
anonymous
=
1
self
.
log_info
(
'Successful login.'
)
self
.
respond
(
'230 Login successful.'
)
else
:
self
.
respond
(
'530 Unauthorized.'
)
def
cmd_appe
(
self
,
line
):
self
.
respond
(
'502 Command not implemented.'
)
# Override ftp server receive channel reponse mechanism
# XXX hack alert, this should probably be redone in a more OO way.
def
handle_close
(
self
):
"""response and closure of channel is delayed."""
s
=
self
.
channel
.
server
s
.
total_files_in
.
increment
()
s
.
total_bytes_in
.
increment
(
self
.
bytes_in
.
as_long
())
self
.
fd
.
close
()
self
.
readable
=
lambda
:
0
# don't call close again
recv_channel
.
handle_close
=
handle_close
class
ContentReceiver
:
"Write-only file object used to receive data from FTP"
def
__init__
(
self
,
callback
,
*
args
):
self
.
data
=
StringIO
()
self
.
callback
=
callback
self
.
args
=
args
def
write
(
self
,
data
):
self
.
data
.
write
(
data
)
def
close
(
self
):
self
.
data
.
seek
(
0
)
args
=
self
.
args
+
(
self
.
data
,)
c
=
self
.
callback
self
.
callback
=
None
self
.
args
=
None
apply
(
c
,
args
)
class
FTPLimiter
:
"""Rudimentary FTP limits. Helps prevent denial of service
attacks. It works by limiting the number of simultaneous
connections by userid. There are three limits, one for anonymous
connections, and one for authenticated logins. The total number
of simultaneous anonymous logins my be less than or equal to the
anonymous limit. Each authenticated user can have up to the user
limit number of simultaneous connections. The total limit is the
maximum number of simultaneous connections of any sort. Do *not*
set the total limit lower than or equal to the anonymous limit."""
def
__init__
(
self
,
anon_limit
=
10
,
user_limit
=
4
,
total_limit
=
25
):
self
.
anon_limit
=
anon_limit
self
.
user_limit
=
user_limit
self
.
total_limit
=
total_limit
def
check_limit
(
self
,
channel
):
"""Check to see if the user has exhausted their limit or not.
Check for existing channels with the same userid and the same
ftp server."""
total
=
0
class_total
=
0
if
channel
.
anonymous
:
for
existing_channel
in
asyncore
.
socket_map
.
values
():
if
(
hasattr
(
existing_channel
,
'server'
)
and
existing_channel
.
server
is
channel
.
server
):
total
=
total
+
1
if
existing_channel
.
anonymous
:
class_total
=
class_total
+
1
if
class_total
>
self
.
anon_limit
:
return
None
else
:
for
existing_channel
in
asyncore
.
socket_map
.
values
():
if
(
hasattr
(
existing_channel
,
'server'
)
and
existing_channel
.
server
is
channel
.
server
):
total
=
total
+
1
if
channel
.
userid
==
existing_channel
.
userid
:
class_total
=
class_total
+
1
if
class_total
>
self
.
user_limit
:
return
None
if
total
<=
self
.
total_limit
:
return
1
class
FTPServer
(
ftp_server
):
"""FTP server for Zope."""
ftp_channel_class
=
zope_ftp_channel
limiter
=
FTPLimiter
(
10
,
1
)
shutup
=
0
def
__init__
(
self
,
module
,
*
args
,
**
kw
):
self
.
shutup
=
1
apply
(
ftp_server
.
__init__
,
(
self
,
None
)
+
args
,
kw
)
self
.
shutup
=
0
self
.
module
=
module
self
.
log_info
(
'FTP server started at %s
\
n
'
'
\
t
Hostname: %s
\
n
\
t
Port: %d'
%
(
time
.
ctime
(
time
.
time
()),
self
.
hostname
,
self
.
port
))
def
clean_shutdown_control
(
self
,
phase
,
time_in_this_phase
):
if
phase
==
2
:
self
.
log_info
(
'closing FTP to new connections'
)
self
.
close
()
def
log_info
(
self
,
message
,
type
=
'info'
):
if
self
.
shutup
:
return
asyncore
.
dispatcher
.
log_info
(
self
,
message
,
type
)
def
create_socket
(
self
,
family
,
type
):
asyncore
.
dispatcher
.
create_socket
(
self
,
family
,
type
)
requestCloseOnExec
(
self
.
socket
)
def
handle_accept
(
self
):
try
:
conn
,
addr
=
self
.
accept
()
except
TypeError
:
# unpack non-sequence as result of accept
# returning None (in case of EWOULDBLOCK)
return
self
.
total_sessions
.
increment
()
self
.
log_info
(
'Incoming connection from %s:%d'
%
(
addr
[
0
],
addr
[
1
]))
self
.
ftp_channel_class
(
self
,
conn
,
addr
,
self
.
module
)
def
readable
(
self
):
return
len
(
asyncore
.
socket_map
)
<
CONNECTION_LIMIT
def
listen
(
self
,
num
):
# override asyncore limits for nt's listen queue size
self
.
accepting
=
1
return
self
.
socket
.
listen
(
num
)
lib/python/ZServer/HTTPResponse.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
ZServer HTTPResponse
The HTTPResponse class takes care of server headers, response munging
and logging duties.
"""
import
time
,
re
,
sys
,
tempfile
from
cStringIO
import
StringIO
import
thread
from
ZPublisher.HTTPResponse
import
HTTPResponse
from
medusa.http_date
import
build_http_date
from
PubCore.ZEvent
import
Wakeup
from
medusa.producers
import
hooked_producer
from
medusa
import
http_server
import
asyncore
from
Producers
import
ShutdownProducer
,
LoggingProducer
,
CallbackProducer
,
\
file_part_producer
,
file_close_producer
from
types
import
LongType
import
DebugLogger
class
ZServerHTTPResponse
(
HTTPResponse
):
"Used to push data into a channel's producer fifo"
# Set this value to 1 if streaming output in
# HTTP/1.1 should use chunked encoding
http_chunk
=
1
http_chunk_size
=
1024
# defaults
_http_version
=
'1.0'
_http_connection
=
'close'
_server_version
=
'Zope/2.0 ZServer/2.0'
# using streaming response
_streaming
=
0
# using chunking transfer-encoding
_chunking
=
0
def
__str__
(
self
,
html_search
=
re
.
compile
(
'<html>'
,
re
.
I
).
search
,
):
if
self
.
_wrote
:
if
self
.
_chunking
:
return
'0
\
r
\
n
\
r
\
n
'
else
:
return
''
headers
=
self
.
headers
body
=
self
.
body
# set 204 (no content) status if 200 and response is empty
# and not streaming
if
not
headers
.
has_key
(
'content-type'
)
and
\
not
headers
.
has_key
(
'content-length'
)
and
\
not
self
.
_streaming
and
\
self
.
status
==
200
:
self
.
setStatus
(
'nocontent'
)
# add content length if not streaming
if
not
headers
.
has_key
(
'content-length'
)
and
\
not
self
.
_streaming
:
self
.
setHeader
(
'content-length'
,
len
(
body
))
content_length
=
headers
.
get
(
'content-length'
,
None
)
if
content_length
>
0
:
self
.
setHeader
(
'content-length'
,
content_length
)
headersl
=
[]
append
=
headersl
.
append
status
=
headers
.
get
(
'status'
,
'200 OK'
)
# status header must come first.
append
(
"HTTP/%s %s"
%
(
self
.
_http_version
or
'1.0'
,
status
))
if
headers
.
has_key
(
'status'
):
del
headers
[
'status'
]
if
not
headers
.
has_key
(
"Etag"
):
self
.
setHeader
(
'Etag'
,
''
)
# add zserver headers
append
(
'Server: %s'
%
self
.
_server_version
)
append
(
'Date: %s'
%
build_http_date
(
time
.
time
()))
if
self
.
_http_version
==
'1.0'
:
if
self
.
_http_connection
==
'keep-alive'
and
\
self
.
headers
.
has_key
(
'content-length'
):
self
.
setHeader
(
'Connection'
,
'Keep-Alive'
)
else
:
self
.
setHeader
(
'Connection'
,
'close'
)
# Close the connection if we have been asked to.
# Use chunking if streaming output.
if
self
.
_http_version
==
'1.1'
:
if
self
.
_http_connection
==
'close'
:
self
.
setHeader
(
'Connection'
,
'close'
)
elif
not
self
.
headers
.
has_key
(
'content-length'
):
if
self
.
http_chunk
and
self
.
_streaming
:
self
.
setHeader
(
'Transfer-Encoding'
,
'chunked'
)
self
.
_chunking
=
1
else
:
self
.
setHeader
(
'Connection'
,
'close'
)
for
key
,
val
in
headers
.
items
():
if
key
.
lower
()
==
key
:
# only change non-literal header names
key
=
"%s%s"
%
(
key
[:
1
].
upper
(),
key
[
1
:])
start
=
0
l
=
key
.
find
(
'-'
,
start
)
while
l
>=
start
:
key
=
"%s-%s%s"
%
(
key
[:
l
],
key
[
l
+
1
:
l
+
2
].
upper
(),
key
[
l
+
2
:])
start
=
l
+
1
l
=
key
.
find
(
'-'
,
start
)
append
(
"%s: %s"
%
(
key
,
val
))
if
self
.
cookies
:
headersl
=
headersl
+
self
.
_cookie_list
()
headersl
[
len
(
headersl
):]
=
[
self
.
accumulated_headers
,
body
]
return
"
\
r
\
n
"
.
join
(
headersl
)
_tempfile
=
None
_templock
=
None
_tempstart
=
0
def
write
(
self
,
data
):
"""
\
Return data as a stream
HTML data may be returned using a stream-oriented interface.
This allows the browser to display partial results while
computation of a response to proceed.
The published object should first set any output headers or
cookies on the response object.
Note that published objects must not generate any errors
after beginning stream-oriented output.
"""
stdout
=
self
.
stdout
if
not
self
.
_wrote
:
l
=
self
.
headers
.
get
(
'content-length'
,
None
)
if
l
is
not
None
:
try
:
if
type
(
l
)
is
type
(
''
):
l
=
int
(
l
)
if
l
>
128000
:
self
.
_tempfile
=
tempfile
.
TemporaryFile
()
self
.
_templock
=
thread
.
allocate_lock
()
except
:
pass
self
.
_streaming
=
1
stdout
.
write
(
str
(
self
))
self
.
_wrote
=
1
if
not
data
:
return
if
self
.
_chunking
:
data
=
'%x
\
r
\
n
%s
\
r
\
n
'
%
(
len
(
data
),
data
)
l
=
len
(
data
)
t
=
self
.
_tempfile
if
t
is
None
or
l
<
200
:
stdout
.
write
(
data
)
else
:
b
=
self
.
_tempstart
e
=
b
+
l
self
.
_templock
.
acquire
()
try
:
t
.
seek
(
b
)
t
.
write
(
data
)
finally
:
self
.
_templock
.
release
()
self
.
_tempstart
=
e
stdout
.
write
(
file_part_producer
(
t
,
self
.
_templock
,
b
,
e
),
l
)
_retried_response
=
None
def
_finish
(
self
):
if
self
.
_retried_response
:
try
:
self
.
_retried_response
.
_finish
()
finally
:
self
.
_retried_response
=
None
return
stdout
=
self
.
stdout
t
=
self
.
_tempfile
if
t
is
not
None
:
stdout
.
write
(
file_close_producer
(
t
),
0
)
self
.
_tempfile
=
None
stdout
.
finish
(
self
)
stdout
.
close
()
self
.
stdout
=
None
# need to break cycle?
self
.
_request
=
None
def
retry
(
self
):
"""Return a request object to be used in a retry attempt
"""
# This implementation is a bit lame, because it assumes that
# only stdout stderr were passed to the constructor. OTOH, I
# think that that's all that is ever passed.
response
=
self
.
__class__
(
stdout
=
self
.
stdout
,
stderr
=
self
.
stderr
)
response
.
headers
=
self
.
headers
response
.
_http_version
=
self
.
_http_version
response
.
_http_connection
=
self
.
_http_connection
response
.
_server_version
=
self
.
_server_version
self
.
_retried_response
=
response
return
response
class
ChannelPipe
:
"""Experimental pipe from ZPublisher to a ZServer Channel.
Should only be used by one thread at a time. Note also that
the channel will be being handled by another thread, thus
restrict access to channel to the push method only."""
def
__init__
(
self
,
request
):
self
.
_channel
=
request
.
channel
self
.
_request
=
request
self
.
_shutdown
=
0
self
.
_close
=
0
self
.
_bytes
=
0
def
write
(
self
,
text
,
l
=
None
):
if
self
.
_channel
.
closed
:
return
if
l
is
None
:
l
=
len
(
text
)
self
.
_bytes
=
self
.
_bytes
+
l
self
.
_channel
.
push
(
text
,
0
)
Wakeup
()
def
close
(
self
):
DebugLogger
.
log
(
'A'
,
id
(
self
.
_request
),
'%s %s'
%
(
self
.
_request
.
reply_code
,
self
.
_bytes
))
if
not
self
.
_channel
.
closed
:
self
.
_channel
.
push
(
LoggingProducer
(
self
.
_request
,
self
.
_bytes
),
0
)
self
.
_channel
.
push
(
CallbackProducer
(
self
.
_channel
.
done
),
0
)
self
.
_channel
.
push
(
CallbackProducer
(
lambda
t
=
(
'E'
,
id
(
self
.
_request
)):
apply
(
DebugLogger
.
log
,
t
)),
0
)
if
self
.
_shutdown
:
self
.
_channel
.
push
(
ShutdownProducer
(),
0
)
Wakeup
()
else
:
if
self
.
_close
:
self
.
_channel
.
push
(
None
,
0
)
Wakeup
()
else
:
# channel closed too soon
self
.
_request
.
log
(
self
.
_bytes
)
DebugLogger
.
log
(
'E'
,
id
(
self
.
_request
))
if
self
.
_shutdown
:
Wakeup
(
lambda
:
asyncore
.
close_all
())
else
:
Wakeup
()
self
.
_channel
=
None
#need to break cycles?
self
.
_request
=
None
def
flush
(
self
):
pass
# yeah, whatever
def
finish
(
self
,
response
):
if
response
.
_shutdownRequested
():
self
.
_shutdown
=
1
if
response
.
headers
.
get
(
'connection'
,
''
)
==
'close'
or
\
response
.
headers
.
get
(
'Connection'
,
''
)
==
'close'
:
self
.
_close
=
1
self
.
_request
.
reply_code
=
response
.
status
is_proxying_match
=
re
.
compile
(
r'[^ ]* [^ \\]*:'
).
match
proxying_connection_re
=
re
.
compile
(
'Proxy-Connection: (.*)'
,
re
.
IGNORECASE
)
def
make_response
(
request
,
headers
):
"Simple http response factory"
# should this be integrated into the HTTPResponse constructor?
response
=
ZServerHTTPResponse
(
stdout
=
ChannelPipe
(
request
),
stderr
=
StringIO
())
response
.
_http_version
=
request
.
version
if
request
.
version
==
'1.0'
and
is_proxying_match
(
request
.
request
):
# a request that was made as if this zope was an http 1.0 proxy.
# that means we have to use some slightly different http
# headers to manage persistent connections.
connection_re
=
proxying_connection_re
else
:
# a normal http request
connection_re
=
http_server
.
CONNECTION
response
.
_http_connection
=
http_server
.
get_header
(
connection_re
,
request
.
header
).
lower
()
response
.
_server_version
=
request
.
channel
.
server
.
SERVER_IDENT
return
response
lib/python/ZServer/HTTPServer.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Medusa HTTP server for Zope
changes from Medusa's http_server
Request Threads -- Requests are processed by threads from a thread
pool.
Output Handling -- Output is pushed directly into the producer
fifo by the request-handling thread. The HTTP server does not do
any post-processing such as chunking.
Pipelineable -- This is needed for protocols such as HTTP/1.1 in
which mutiple requests come in on the same channel, before
responses are sent back. When requests are pipelined, the client
doesn't wait for the response before sending another request. The
server must ensure that responses are sent back in the same order
as requests are received.
"""
import
sys
import
re
import
os
import
posixpath
import
types
import
thread
import
time
import
socket
from
cStringIO
import
StringIO
from
PubCore
import
handle
from
HTTPResponse
import
make_response
from
ZPublisher.HTTPRequest
import
HTTPRequest
from
medusa.http_server
import
http_server
,
get_header
,
http_channel
,
VERSION_STRING
import
asyncore
from
medusa
import
counter
,
producers
from
medusa.test
import
max_sockets
from
medusa.default_handler
import
unquote
from
asyncore
import
compact_traceback
,
dispatcher
from
ZServer
import
CONNECTION_LIMIT
,
ZOPE_VERSION
,
ZSERVER_VERSION
from
ZServer
import
requestCloseOnExec
from
zLOG
import
LOG
,
register_subsystem
,
BLATHER
,
INFO
,
WARNING
,
ERROR
import
DebugLogger
from
medusa
import
logger
register_subsystem
(
'ZServer HTTPServer'
)
CONTENT_LENGTH
=
re
.
compile
(
'Content-Length: ([0-9]+)'
,
re
.
I
)
CONNECTION
=
re
.
compile
(
'Connection: (.*)'
,
re
.
I
)
USER_AGENT
=
re
.
compile
(
'User-Agent: (.*)'
,
re
.
I
)
# maps request some headers to environment variables.
# (those that don't start with 'HTTP_')
header2env
=
{
'content-length'
:
'CONTENT_LENGTH'
,
'content-type'
:
'CONTENT_TYPE'
,
'connection'
:
'CONNECTION_TYPE'
,
}
class
zhttp_collector
:
def
__init__
(
self
,
handler
,
request
,
size
):
self
.
handler
=
handler
self
.
request
=
request
if
size
>
524288
:
# write large upload data to a file
from
tempfile
import
TemporaryFile
self
.
data
=
TemporaryFile
(
'w+b'
)
else
:
self
.
data
=
StringIO
()
request
.
channel
.
set_terminator
(
size
)
request
.
collector
=
self
# put and post collection methods
#
def
collect_incoming_data
(
self
,
data
):
self
.
data
.
write
(
data
)
def
found_terminator
(
self
):
# reset collector
self
.
request
.
channel
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
self
.
request
.
collector
=
None
# finish request
self
.
data
.
seek
(
0
)
r
=
self
.
request
d
=
self
.
data
del
self
.
request
del
self
.
data
self
.
handler
.
continue_request
(
d
,
r
)
class
zhttp_handler
:
"A medusa style handler for zhttp_server"
_force_connection_close
=
0
def
__init__
(
self
,
module
,
uri_base
=
None
,
env
=
None
):
"""Creates a zope_handler
module -- string, the name of the module to publish
uri_base -- string, the base uri of the published module
defaults to '/<module name>' if not given.
env -- dictionary, environment variables to be overridden.
Replaces standard variables with supplied ones.
"""
self
.
module_name
=
module
self
.
env_override
=
env
or
{}
self
.
hits
=
counter
.
counter
()
# if uri_base is unspecified, assume it
# starts with the published module name
#
if
uri_base
is
None
:
uri_base
=
'/%s'
%
module
elif
uri_base
==
''
:
uri_base
=
'/'
else
:
if
uri_base
[
0
]
!=
'/'
:
uri_base
=
'/'
+
uri_base
if
uri_base
[
-
1
]
==
'/'
:
uri_base
=
uri_base
[:
-
1
]
self
.
uri_base
=
uri_base
uri_regex
=
'%s.*'
%
self
.
uri_base
self
.
uri_regex
=
re
.
compile
(
uri_regex
)
def
match
(
self
,
request
):
uri
=
request
.
uri
if
self
.
uri_regex
.
match
(
uri
):
return
1
else
:
return
0
def
handle_request
(
self
,
request
):
self
.
hits
.
increment
()
DebugLogger
.
log
(
'B'
,
id
(
request
),
'%s %s'
%
(
request
.
command
.
upper
(),
request
.
uri
))
size
=
get_header
(
CONTENT_LENGTH
,
request
.
header
)
if
size
and
size
!=
'0'
:
size
=
int
(
size
)
zhttp_collector
(
self
,
request
,
size
)
else
:
sin
=
StringIO
()
self
.
continue_request
(
sin
,
request
)
def
get_environment
(
self
,
request
,
# These are strictly performance hackery...
h2ehas
=
header2env
.
has_key
,
h2eget
=
header2env
.
get
,
workdir
=
os
.
getcwd
(),
ospath
=
os
.
path
,
):
(
path
,
params
,
query
,
fragment
)
=
request
.
split_uri
()
if
params
:
path
=
path
+
params
# undo medusa bug!
while
path
and
path
[
0
]
==
'/'
:
path
=
path
[
1
:]
if
'%'
in
path
:
path
=
unquote
(
path
)
if
query
:
# ZPublisher doesn't want the leading '?'
query
=
query
[
1
:]
server
=
request
.
channel
.
server
env
=
{}
env
[
'REQUEST_METHOD'
]
=
request
.
command
.
upper
()
env
[
'SERVER_PORT'
]
=
str
(
server
.
port
)
env
[
'SERVER_NAME'
]
=
server
.
server_name
env
[
'SERVER_SOFTWARE'
]
=
server
.
SERVER_IDENT
env
[
'SERVER_PROTOCOL'
]
=
"HTTP/"
+
request
.
version
env
[
'channel.creation_time'
]
=
request
.
channel
.
creation_time
if
self
.
uri_base
==
'/'
:
env
[
'SCRIPT_NAME'
]
=
''
env
[
'PATH_INFO'
]
=
'/'
+
path
else
:
env
[
'SCRIPT_NAME'
]
=
self
.
uri_base
try
:
path_info
=
path
.
split
(
self
.
uri_base
[
1
:],
1
)[
1
]
except
:
path_info
=
''
env
[
'PATH_INFO'
]
=
path_info
env
[
'PATH_TRANSLATED'
]
=
ospath
.
normpath
(
ospath
.
join
(
workdir
,
env
[
'PATH_INFO'
]))
if
query
:
env
[
'QUERY_STRING'
]
=
query
env
[
'GATEWAY_INTERFACE'
]
=
'CGI/1.1'
env
[
'REMOTE_ADDR'
]
=
request
.
channel
.
addr
[
0
]
# This is a really bad hack to support WebDAV
# clients accessing documents through GET
# on the HTTP port. We check if your WebDAV magic
# machinery is enabled and if the client is recognized
# as WebDAV client. If yes, we fake the environment
# to pretend the ZPublisher to have a WebDAV request.
# This sucks like hell but it works pretty fine ;-)
if
env
[
'REQUEST_METHOD'
]
==
'GET'
and
self
.
_wdav_client_reg
:
self
.
_munge_webdav_source_port
(
request
,
env
)
# If we're using a resolving logger, try to get the
# remote host from the resolver's cache.
if
hasattr
(
server
.
logger
,
'resolver'
):
dns_cache
=
server
.
logger
.
resolver
.
cache
if
dns_cache
.
has_key
(
env
[
'REMOTE_ADDR'
]):
remote_host
=
dns_cache
[
env
[
'REMOTE_ADDR'
]][
2
]
if
remote_host
is
not
None
:
env
[
'REMOTE_HOST'
]
=
remote_host
env_has
=
env
.
has_key
for
header
in
request
.
header
:
key
,
value
=
header
.
split
(
":"
,
1
)
key
=
key
.
lower
()
value
=
value
.
strip
()
if
h2ehas
(
key
)
and
value
:
env
[
h2eget
(
key
)]
=
value
else
:
key
=
'HTTP_%s'
%
(
"_"
.
join
(
key
.
split
(
"-"
))).
upper
()
if
value
and
not
env_has
(
key
):
env
[
key
]
=
value
env
.
update
(
self
.
env_override
)
return
env
_wdav_client_reg
=
None
def
_munge_webdav_source_port
(
self
,
request
,
env
):
agent
=
get_header
(
USER_AGENT
,
request
.
header
)
if
self
.
_wdav_client_reg
(
agent
):
env
[
'WEBDAV_SOURCE_PORT'
]
=
1
path_info
=
env
[
'PATH_INFO'
]
path_info
=
posixpath
.
join
(
path_info
,
'manage_FTPget'
)
path_info
=
posixpath
.
normpath
(
path_info
)
env
[
'PATH_INFO'
]
=
path_info
def
set_webdav_source_clients
(
self
,
regex
):
self
.
_wdav_client_reg
=
re
.
compile
(
regex
).
search
def
continue_request
(
self
,
sin
,
request
):
"continue handling request now that we have the stdin"
s
=
get_header
(
CONTENT_LENGTH
,
request
.
header
)
if
s
:
s
=
int
(
s
)
else
:
s
=
0
DebugLogger
.
log
(
'I'
,
id
(
request
),
s
)
env
=
self
.
get_environment
(
request
)
zresponse
=
make_response
(
request
,
env
)
if
self
.
_force_connection_close
:
zresponse
.
_http_connection
=
'close'
zrequest
=
HTTPRequest
(
sin
,
env
,
zresponse
)
request
.
channel
.
current_request
=
None
request
.
channel
.
queue
.
append
((
self
.
module_name
,
zrequest
,
zresponse
))
request
.
channel
.
work
()
def
status
(
self
):
return
producers
.
simple_producer
(
"""
<li>Zope Handler
<ul>
<li><b>Published Module:</b> %s
<li><b>Hits:</b> %s
</ul>"""
%
(
self
.
module_name
,
self
.
hits
)
)
class
zhttp_channel
(
http_channel
):
"http channel"
closed
=
0
no_more_requests
=
0
zombie_timeout
=
100
*
60
# 100 minutes
max_header_len
=
8196
def
__init__
(
self
,
server
,
conn
,
addr
):
http_channel
.
__init__
(
self
,
server
,
conn
,
addr
)
requestCloseOnExec
(
conn
)
self
.
queue
=
[]
self
.
working
=
0
def
push
(
self
,
producer
,
send
=
1
):
# this is thread-safe when send is false
# note, that strings are not wrapped in
# producers by default
if
self
.
closed
:
return
self
.
producer_fifo
.
push
(
producer
)
if
send
:
self
.
initiate_send
()
push_with_producer
=
push
def
clean_shutdown_control
(
self
,
phase
,
time_in_this_phase
):
if
phase
==
3
:
# This is the shutdown phase where we are trying to finish processing
# outstanding requests, and not accept any more
self
.
no_more_requests
=
1
if
self
.
working
or
self
.
writable
():
# We are busy working on an old request. Try to stall shutdown
return
1
else
:
# We are no longer busy. Close ourself and allow shutdown to proceed
self
.
close
()
return
0
def
work
(
self
):
"try to handle a request"
if
not
self
.
working
:
if
self
.
queue
and
not
self
.
no_more_requests
:
self
.
working
=
1
try
:
module_name
,
request
,
response
=
self
.
queue
.
pop
(
0
)
except
:
return
handle
(
module_name
,
request
,
response
)
def
close
(
self
):
self
.
closed
=
1
while
self
.
queue
:
self
.
queue
.
pop
()
if
self
.
current_request
is
not
None
:
self
.
current_request
.
channel
=
None
# break circ refs
self
.
current_request
=
None
while
self
.
producer_fifo
:
p
=
self
.
producer_fifo
.
first
()
if
p
is
not
None
and
type
(
p
)
!=
types
.
StringType
:
p
.
more
()
# free up resources held by producer
self
.
producer_fifo
.
pop
()
dispatcher
.
close
(
self
)
def
done
(
self
):
"Called when a publishing request is finished"
self
.
working
=
0
self
.
work
()
def
kill_zombies
(
self
):
now
=
int
(
time
.
time
())
for
channel
in
asyncore
.
socket_map
.
values
():
if
channel
.
__class__
==
self
.
__class__
:
if
(
now
-
channel
.
creation_time
)
>
channel
.
zombie_timeout
:
channel
.
close
()
def
collect_incoming_data
(
self
,
data
):
# Override medusa http_channel implementation to prevent DOS attacks
# that send never-ending HTTP headers.
if
self
.
current_request
:
# we are receiving data (probably POST data) for a request
self
.
current_request
.
collect_incoming_data
(
data
)
else
:
# we are receiving header (request) data
self
.
in_buffer
=
self
.
in_buffer
+
data
if
len
(
self
.
in_buffer
)
>
self
.
max_header_len
:
raise
ValueError
(
'HTTP headers invalid (too long)'
)
class
zhttp_server
(
http_server
):
"http server"
SERVER_IDENT
=
'Zope/%s ZServer/%s'
%
(
ZOPE_VERSION
,
ZSERVER_VERSION
)
channel_class
=
zhttp_channel
shutup
=
0
def
__init__
(
self
,
ip
,
port
,
resolver
=
None
,
logger_object
=
None
):
self
.
shutup
=
1
http_server
.
__init__
(
self
,
ip
,
port
,
resolver
,
logger_object
)
self
.
shutup
=
0
self
.
log_info
(
'HTTP server started at %s
\
n
'
'
\
t
Hostname: %s
\
n
\
t
Port: %d'
%
(
time
.
ctime
(
time
.
time
()),
self
.
server_name
,
self
.
server_port
))
def
clean_shutdown_control
(
self
,
phase
,
time_in_this_phase
):
if
phase
==
2
:
self
.
log_info
(
'closing HTTP to new connections'
)
self
.
close
()
def
log_info
(
self
,
message
,
type
=
'info'
):
if
self
.
shutup
:
return
dispatcher
.
log_info
(
self
,
message
,
type
)
def
create_socket
(
self
,
family
,
type
):
dispatcher
.
create_socket
(
self
,
family
,
type
)
requestCloseOnExec
(
self
.
socket
)
def
readable
(
self
):
return
self
.
accepting
and
\
len
(
asyncore
.
socket_map
)
<
CONNECTION_LIMIT
def
listen
(
self
,
num
):
# override asyncore limits for nt's listen queue size
self
.
accepting
=
1
return
self
.
socket
.
listen
(
num
)
lib/python/ZServer/ICPServer.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# Medusa ICP server
#
# Why would you want to use this?
# see http://www.zope.org/Members/htrd/icp/intro
import
sys
,
string
,
os
,
socket
,
errno
,
struct
import
asyncore
from
medusa
import
counter
ICP_OP_QUERY
=
1
ICP_OP_HIT
=
2
ICP_OP_MISS
=
3
ICP_OP_ERR
=
4
ICP_OP_MISS_NOFETCH
=
21
ICP_OP_DENIED
=
22
class
BaseICPServer
(
asyncore
.
dispatcher
):
REQUESTS_PER_LOOP
=
4
_shutdown
=
0
def
__init__
(
self
,
ip
,
port
):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_DGRAM
)
self
.
set_reuse_addr
()
self
.
bind
((
ip
,
port
))
if
ip
==
''
:
addr
=
'any'
else
:
addr
=
ip
self
.
log_info
(
'ICP server started
\
n
\
t
Address: %s
\
n
\
t
Port: %s'
%
(
addr
,
port
)
)
def
clean_shutdown_control
(
self
,
phase
,
time_in_this_phase
):
if
phase
==
1
:
# Stop responding to requests.
if
not
self
.
_shutdown
:
self
.
_shutdown
=
1
self
.
log_info
(
'shutting down ICP'
)
if
time_in_this_phase
<
2.0
:
# We have not yet been deaf long enough for our front end proxies to notice.
# Do not allow shutdown to proceed yet
return
1
else
:
# Shutdown can proceed. We dont need a socket any more
self
.
close
()
return
0
def
handle_read
(
self
):
for
i
in
range
(
self
.
REQUESTS_PER_LOOP
):
try
:
request
,
whence
=
self
.
socket
.
recvfrom
(
16384
)
except
socket
.
error
,
e
:
if
e
[
0
]
==
errno
.
EWOULDBLOCK
:
break
else
:
raise
else
:
if
self
.
check_whence
(
whence
):
reply
=
self
.
calc_reply
(
request
)
if
reply
:
self
.
socket
.
sendto
(
reply
,
whence
)
def
readable
(
self
):
return
not
self
.
_shutdown
def
writable
(
self
):
return
0
def
handle_write
(
self
):
self
.
log_info
(
'unexpected write event'
,
'warning'
)
def
handle_error
(
self
):
# don't close the socket on error
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'Problem in ICP (%s:%s %s)'
%
(
t
,
v
,
tbinfo
),
'error'
)
def
check_whence
(
self
,
whence
):
return
1
def
calc_reply
(
self
,
request
):
if
len
(
request
)
>
20
:
opcode
,
version
,
length
,
number
,
options
,
opdata
,
junk
=
struct
.
unpack
(
'!BBHIIII'
,
request
[:
20
])
if
version
==
2
:
if
opcode
==
ICP_OP_QUERY
:
if
len
(
request
)
!=
length
:
out_opcode
=
ICP_OP_ERR
else
:
url
=
request
[
24
:]
if
url
[
-
1
:]
==
'
\
x00
'
:
url
=
url
[:
-
1
]
out_opcode
=
self
.
check_url
(
url
)
return
struct
.
pack
(
'!BBHIIII'
,
out_opcode
,
2
,
20
,
number
,
0
,
0
,
0
)
def
check_url
(
self
,
url
):
# derived classes replace this with a more
# useful policy
return
ICP_OP_MISS
class
ICPServer
(
BaseICPServer
):
# Products that want to do special ICP handling should .append their hooks into
# this list. Each hook is called in turn with the URL as a parameter, and
# they must return an ICP_OP code from above or None. The first
# non-None return is used as the ICP response
hooks
=
[]
def
check_url
(
self
,
url
):
for
hook
in
self
.
hooks
:
r
=
hook
(
url
)
if
r
is
not
None
:
return
r
return
ICP_OP_MISS
lib/python/ZServer/INSTALL.txt
0 → 100644
View file @
c658c172
ZServer Installation
--------------------
Requirements
ZServer comes with Zope 2. Though ZServer can be used with earlier
versions of Zope, this is not supported and not covered by this
document.
Configuration
To run ZServer you simply execute the z2.py start script which is
located in your Zope directory. You can pass commandline arguments
to the start script in order to run Zope with different options. In
order to see a list of options use the '-h' help option.
Here's an example of use::
$ python2.1 z2.py -w 8888 -f "" -p "" -m "" &
This example starts Zope using a web server on port 8888. It does
not start and FTP server, or a PCGI server, or a Monitor server. It
also starts the server running in the backaground.
Shell scripts and batch files
You may also wish to create a shell script (or batch file under
win32) to set environment variables (such as ZOPE_DEBUG_MODE and
PYTHONHOME) and run the start script.
Here's an example shell script for a binary Zope release::
ZOPE_DEBUG_MODE=1
export ZOPE_DEBUG_MODE
PYTHONHOME=/home/Zope
export PYTHONHOME
/home/Zope/bin/python /home/Zope/z2.py -w 9673 &
Note: If ZServer fails because it can't find some standard Python
libaries there's a good bet that you need to set the PYTHONHOME as
shown above.
Here's an example batch file::
set ZOPE_DEBUG_MODE=1
"\Program Files\Zope\bin\python" "\Program Files\Zope\z2.py -w
8888 -f 8021"
Now you're ready to go.
Starting ZServer
To start ZServer run the start script::
$ python2.1 z2.py
To stop the server type 'control-c'.
Note: If you've created a shell script or batch file to run ZServer
use that instead.
You should see some Medusa information come up on the screen as Zope
starts.
A log file will be written to the 'var' directory, named
'Z2.log' by default.
Using ZServer
Once you start ZServer is will publish Zope (or any Python module)
on HTTP and/or FTP. To access Zope via HTTP point your browser at
the server like so::
http://www.example.com:9673/
This assumes that you have chosen to put HTTP on port 9673 and that
you are publishing a module named whose URL prefix is set to ''.
Note: to publish Zope normally you publish the 'lib/python/Zope.py'
module.
To access Zope via FTP you need to FTP to it at the port you set FTP
to run on. For example::
ftp www.example.com 9221
This opens a FTP session to your machine on port 9221, ZServer's
default FTP port. When you are prompted to log in you should supply
a Zope username and password. (Probably you should use an account
with the 'Manager' role, unless you have configured Zope to allow
FTP access to the 'Anonymous' role.) You can also enter 'anonymous'
and any password for anonymous FTP access. Once you have logged in
you can start issuing normal FTP commands.
Right now ZServer supports most basic FTP commands.
Note: When you log in your working directory is set to '/'. If you
do not have FTP permissions in this directory, you will need to 'cd'
to a directory where you have permissions before you can do
anything. See above for more information about logging into FTP.
Advanced Usage: zdaemon.py and the Zope NT service.
One problem you may notice with ZServer is that once the server is
shutdown, either through the web management interface, or by some
other means, it will not automatically be restarted.
On Unix you can use zdeamon.py to keep Zope running. Specifying
the '-Z' switch when starting Zope runs zdaemon.py. Zdeamon
will restart Zope when it Zope is restarted through the web, and in
case of an unexpected error.
On NT, use the Zope service for the same functionality. See ZServer.py
for more information on running ZServer as a service.
Where to go from here
Check out the README.txt file. It contains information on what
ZServer can do, how it works and and what you can do if you run into
problems.
And don't forget to have fun!
lib/python/ZServer/PCGIServer.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Medusa PCGI server.
This server functions as the PCGI publisher--it accepts the request
from the PCGI wrapper CGI program, services the request, and sends
back the response.
It should work with both inet and unix domain sockets.
Why would you want to use it? Using PCGI to connect to ZServer from
another webserver is similar to using the web server as a proxy,
with the difference, that the web server gets to control the
environment and headers completely.
Note that ZServer can operate multiple PCGI servers.
"""
from
medusa
import
logger
import
asynchat
,
asyncore
from
medusa.counter
import
counter
from
medusa.http_server
import
compute_timezone_for_log
from
asyncore
import
compact_traceback
import
ZServer
from
ZServer
import
CONNECTION_LIMIT
,
requestCloseOnExec
from
PubCore
import
handle
from
PubCore.ZEvent
import
Wakeup
from
ZPublisher.HTTPResponse
import
HTTPResponse
from
ZPublisher.HTTPRequest
import
HTTPRequest
from
Producers
import
ShutdownProducer
,
LoggingProducer
,
CallbackProducer
import
DebugLogger
from
cStringIO
import
StringIO
from
tempfile
import
TemporaryFile
import
socket
,
string
,
os
,
sys
,
time
from
types
import
StringType
,
TupleType
tz_for_log
=
compute_timezone_for_log
()
class
PCGIChannel
(
asynchat
.
async_chat
):
"""Processes a PCGI request by collecting the env and stdin and
then passing them to ZPublisher. The result is wrapped in a
producer and sent back."""
closed
=
0
def
__init__
(
self
,
server
,
sock
,
addr
):
self
.
server
=
server
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
requestCloseOnExec
(
sock
)
self
.
env
=
{}
self
.
data
=
StringIO
()
self
.
set_terminator
(
10
)
self
.
size
=
None
self
.
done
=
None
def
found_terminator
(
self
):
if
self
.
size
is
None
:
# read the next size header
# and prepare to read env or stdin
self
.
data
.
seek
(
0
)
self
.
size
=
string
.
atoi
(
self
.
data
.
read
())
self
.
set_terminator
(
self
.
size
)
if
self
.
size
==
0
:
DebugLogger
.
log
(
'I'
,
id
(
self
),
0
)
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
StringIO
()
self
.
send_response
()
elif
self
.
size
>
1048576
:
self
.
data
=
TemporaryFile
(
'w+b'
)
else
:
self
.
data
=
StringIO
()
elif
not
self
.
env
:
# read env
self
.
size
=
None
self
.
data
.
seek
(
0
)
buff
=
self
.
data
.
read
()
for
line
in
string
.
split
(
buff
,
'
\
000
'
):
try
:
k
,
v
=
string
.
split
(
line
,
'='
,
1
)
self
.
env
[
k
]
=
v
except
:
pass
# Hack around broken IIS PATH_INFO
# maybe, this should go in ZPublisher...
if
self
.
env
.
has_key
(
'SERVER_SOFTWARE'
)
and
\
string
.
find
(
self
.
env
[
'SERVER_SOFTWARE'
],
'Microsoft-IIS'
)
!=
-
1
:
script
=
filter
(
None
,
string
.
split
(
string
.
strip
(
self
.
env
[
'SCRIPT_NAME'
]),
'/'
))
path
=
filter
(
None
,
string
.
split
(
string
.
strip
(
self
.
env
[
'PATH_INFO'
]),
'/'
))
self
.
env
[
'PATH_INFO'
]
=
'/'
+
string
.
join
(
path
[
len
(
script
):],
'/'
)
self
.
data
=
StringIO
()
DebugLogger
.
log
(
'B'
,
id
(
self
),
'%s %s'
%
(
self
.
env
[
'REQUEST_METHOD'
],
self
.
env
.
get
(
'PATH_INFO'
,
'/'
)))
# now read the next size header
self
.
set_terminator
(
10
)
else
:
DebugLogger
.
log
(
'I'
,
id
(
self
),
self
.
terminator
)
# we're done, we've got both env and stdin
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
.
seek
(
0
)
self
.
send_response
()
def
send_response
(
self
):
# create an output pipe by passing request to ZPublisher,
# and requesting a callback of self.log with the module
# name and PATH_INFO as an argument.
self
.
done
=
1
response
=
PCGIResponse
(
stdout
=
PCGIPipe
(
self
),
stderr
=
StringIO
())
request
=
HTTPRequest
(
self
.
data
,
self
.
env
,
response
)
handle
(
self
.
server
.
module
,
request
,
response
)
def
collect_incoming_data
(
self
,
data
):
self
.
data
.
write
(
data
)
def
readable
(
self
):
if
not
self
.
done
:
return
1
def
log_request
(
self
,
bytes
):
if
self
.
env
.
has_key
(
'HTTP_USER_AGENT'
):
user_agent
=
self
.
env
[
'HTTP_USER_AGENT'
]
else
:
user_agent
=
''
if
self
.
env
.
has_key
(
'HTTP_REFERER'
):
referer
=
self
.
env
[
'HTTP_REFERER'
]
else
:
referer
=
''
if
self
.
env
.
has_key
(
'PATH_INFO'
):
path
=
self
.
env
[
'PATH_INFO'
]
else
:
path
=
'%s/'
%
self
.
server
.
module
if
self
.
env
.
has_key
(
'REQUEST_METHOD'
):
method
=
self
.
env
[
'REQUEST_METHOD'
]
else
:
method
=
"GET"
addr
=
self
.
addr
if
addr
and
type
(
addr
)
is
TupleType
:
self
.
server
.
logger
.
log
(
addr
[
0
],
'%d - - [%s] "%s %s" %d %d "%s" "%s"'
%
(
addr
[
1
],
time
.
strftime
(
'%d/%b/%Y:%H:%M:%S '
,
time
.
localtime
(
time
.
time
())
)
+
tz_for_log
,
method
,
path
,
self
.
reply_code
,
bytes
,
referer
,
user_agent
)
)
else
:
self
.
server
.
logger
.
log
(
'127.0.0.1'
,
' - - [%s] "%s %s" %d %d "%s" "%s"'
%
(
time
.
strftime
(
'%d/%b/%Y:%H:%M:%S '
,
time
.
gmtime
(
time
.
time
())
)
+
tz_for_log
,
method
,
path
,
self
.
reply_code
,
bytes
,
referer
,
user_agent
)
)
def
push
(
self
,
producer
,
send
=
1
):
# this is thread-safe when send is false
# note, that strings are not wrapped in
# producers by default
self
.
producer_fifo
.
push
(
producer
)
if
send
:
self
.
initiate_send
()
def
__repr__
(
self
):
return
"<PCGIChannel at %x>"
%
id
(
self
)
def
close
(
self
):
self
.
closed
=
1
while
self
.
producer_fifo
:
p
=
self
.
producer_fifo
.
first
()
if
p
is
not
None
and
type
(
p
)
!=
StringType
:
p
.
more
()
# free up resources held by producer
self
.
producer_fifo
.
pop
()
asyncore
.
dispatcher
.
close
(
self
)
class
PCGIServer
(
asyncore
.
dispatcher
):
"""Accepts PCGI requests and hands them off to the PCGIChannel for
handling.
PCGIServer can be configured with either a PCGI info file or by
directly specifying the module, pid_file, and either port (for
inet sockets) or socket_file (for unix domain sockets.)
For inet sockets, the ip argument specifies the address from which
the server will accept connections, '' indicates all addresses. If
you only want to accept connections from the localhost, set ip to
'127.0.0.1'."""
channel_class
=
PCGIChannel
def
__init__
(
self
,
module
=
'Main'
,
ip
=
'127.0.0.1'
,
port
=
None
,
socket_file
=
None
,
pid_file
=
None
,
pcgi_file
=
None
,
resolver
=
None
,
logger_object
=
None
):
self
.
ip
=
ip
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
count
=
counter
()
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
# get configuration
self
.
module
=
module
self
.
port
=
port
self
.
pid_file
=
pid_file
self
.
socket_file
=
socket_file
if
pcgi_file
is
not
None
:
self
.
read_info
(
pcgi_file
)
# write pid file
try
:
f
=
open
(
self
.
pid_file
,
'w'
)
f
.
write
(
str
(
os
.
getpid
()))
f
.
close
()
except
IOError
:
self
.
log_info
(
"Cannot write PID file."
,
'error'
)
# setup sockets
if
self
.
port
:
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
self
.
ip
,
self
.
port
))
self
.
log_info
(
'PCGI Server started at %s
\
n
'
'
\
t
Inet socket port: %s'
%
(
time
.
ctime
(
time
.
time
()),
self
.
port
)
)
else
:
try
:
os
.
unlink
(
self
.
socket_file
)
except
os
.
error
:
pass
self
.
create_socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
(
self
.
socket_file
)
try
:
os
.
chmod
(
self
.
socket_file
,
0777
)
except
os
.
error
:
pass
self
.
log_info
(
'PCGI Server started at %s
\
n
'
'
\
t
Unix socket: %s'
%
(
time
.
ctime
(
time
.
time
()),
self
.
socket_file
)
)
self
.
listen
(
256
)
def
create_socket
(
self
,
family
,
type
):
asyncore
.
dispatcher
.
create_socket
(
self
,
family
,
type
)
requestCloseOnExec
(
self
.
socket
)
def
read_info
(
self
,
info_file
):
"read configuration information from a PCGI info file"
lines
=
open
(
info_file
).
readlines
()
directives
=
{}
try
:
for
line
in
lines
:
line
=
string
.
strip
(
line
)
if
not
len
(
line
)
or
line
[
0
]
==
'#'
:
continue
k
,
v
=
string
.
split
(
line
,
'='
,
1
)
directives
[
string
.
strip
(
k
)]
=
string
.
strip
(
v
)
except
:
raise
'ParseError'
,
'Error parsing PCGI info file'
self
.
pid_file
=
directives
.
get
(
'PCGI_PID_FILE'
,
None
)
self
.
socket_file
=
directives
.
get
(
'PCGI_SOCKET_FILE'
,
None
)
if
directives
.
has_key
(
'PCGI_PORT'
):
self
.
port
=
string
.
atoi
(
directives
[
'PCGI_PORT'
])
if
directives
.
has_key
(
'PCGI_MODULE'
):
self
.
module
=
directives
[
'PCGI_MODULE'
]
elif
directives
.
has_key
(
'PCGI_MODULE_PATH'
):
path
=
directives
[
'PCGI_MODULE_PATH'
]
path
,
module
=
os
.
path
.
split
(
path
)
module
,
ext
=
os
.
path
.
splitext
(
module
)
self
.
module
=
module
def
handle_accept
(
self
):
self
.
count
.
increment
()
try
:
conn
,
addr
=
self
.
accept
()
except
socket
.
error
:
self
.
log_info
(
'Server accept() threw an exception'
,
'warning'
)
return
self
.
channel_class
(
self
,
conn
,
addr
)
def
readable
(
self
):
return
len
(
asyncore
.
socket_map
)
<
CONNECTION_LIMIT
def
writable
(
self
):
return
0
def
listen
(
self
,
num
):
# override asyncore limits for nt's listen queue size
self
.
accepting
=
1
return
self
.
socket
.
listen
(
num
)
class
PCGIResponse
(
HTTPResponse
):
def
write
(
self
,
data
):
if
not
self
.
_wrote
:
self
.
stdout
.
write
(
str
(
self
))
self
.
_wrote
=
1
self
.
stdout
.
write
(
data
)
def
_finish
(
self
):
self
.
stdout
.
finish
(
self
)
self
.
stdout
.
close
()
self
.
stdout
=
None
self
.
_request
=
None
class
PCGIPipe
:
"""
Formats a HTTP response in PCGI format
10 digits indicating len of STDOUT
STDOUT
10 digits indicating len of STDERR
STDERR
Note that this implementation never sends STDERR
"""
def
__init__
(
self
,
channel
):
self
.
_channel
=
channel
self
.
_data
=
StringIO
()
self
.
_shutdown
=
0
def
write
(
self
,
text
):
self
.
_data
.
write
(
text
)
def
close
(
self
):
if
not
self
.
_channel
.
closed
:
data
=
self
.
_data
.
getvalue
()
l
=
len
(
data
)
DebugLogger
.
log
(
'A'
,
id
(
self
.
_channel
),
'%s %s'
%
(
self
.
_channel
.
reply_code
,
l
))
self
.
_channel
.
push
(
'%010d%s%010d'
%
(
l
,
data
,
0
),
0
)
self
.
_channel
.
push
(
LoggingProducer
(
self
.
_channel
,
l
,
'log_request'
),
0
)
self
.
_channel
.
push
(
CallbackProducer
(
lambda
t
=
(
'E'
,
id
(
self
.
_channel
)):
apply
(
DebugLogger
.
log
,
t
)),
0
)
if
self
.
_shutdown
:
try
:
r
=
self
.
_shutdown
[
0
]
except
:
r
=
0
ZServer
.
exit_code
=
r
self
.
_channel
.
push
(
ShutdownProducer
(),
0
)
Wakeup
(
lambda
:
asyncore
.
close_all
())
else
:
self
.
_channel
.
push
(
None
,
0
)
Wakeup
()
self
.
_data
=
None
self
.
_channel
=
None
def
finish
(
self
,
response
):
if
response
.
_shutdownRequested
():
self
.
_shutdown
=
1
self
.
_channel
.
reply_code
=
response
.
status
lib/python/ZServer/Producers.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
ZServer pipe utils. These producers basically function as callbacks.
"""
import
asyncore
import
sys
class
ShutdownProducer
:
"shuts down medusa"
def
more
(
self
):
asyncore
.
close_all
()
class
LoggingProducer
:
"logs request"
def
__init__
(
self
,
logger
,
bytes
,
method
=
'log'
):
self
.
logger
=
logger
self
.
bytes
=
bytes
self
.
method
=
method
def
more
(
self
):
getattr
(
self
.
logger
,
self
.
method
)(
self
.
bytes
)
self
.
logger
=
None
return
''
class
CallbackProducer
:
"Performs a callback in the channel's thread"
def
__init__
(
self
,
callback
):
self
.
callback
=
callback
def
more
(
self
):
self
.
callback
()
self
.
callback
=
None
return
''
class
file_part_producer
:
"producer wrapper for part of a file[-like] objects"
# match http_channel's outgoing buffer size
out_buffer_size
=
1
<<
16
def
__init__
(
self
,
file
,
lock
,
start
,
end
):
self
.
file
=
file
self
.
lock
=
lock
self
.
start
=
start
self
.
end
=
end
def
more
(
self
):
end
=
self
.
end
if
not
end
:
return
''
start
=
self
.
start
if
start
>=
end
:
return
''
file
=
self
.
file
size
=
end
-
start
bsize
=
self
.
out_buffer_size
if
size
>
bsize
:
size
=
bsize
self
.
lock
.
acquire
()
try
:
file
.
seek
(
start
)
data
=
file
.
read
(
size
)
finally
:
self
.
lock
.
release
()
if
data
:
start
=
start
+
len
(
data
)
if
start
<
end
:
self
.
start
=
start
return
data
self
.
end
=
0
del
self
.
file
return
data
class
file_close_producer
:
def
__init__
(
self
,
file
):
self
.
file
=
file
def
more
(
self
):
file
=
self
.
file
if
file
is
not
None
:
file
.
close
()
self
.
file
=
None
return
''
lib/python/ZServer/PubCore/ZEvent.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Simple Event Manager Based on Pipes
"""
from
ZServer.medusa.thread.select_trigger
import
trigger
from
asyncore
import
socket_map
class
simple_trigger
(
trigger
):
def
handle_close
(
self
):
pass
the_trigger
=
simple_trigger
()
def
Wakeup
(
thunk
=
None
):
global
the_trigger
try
:
the_trigger
.
pull_trigger
(
thunk
)
except
OSError
,
why
:
# this broken pipe as a result of perhaps a signal
# we want to handle this gracefully so we get rid of the old
# trigger and install a new one.
if
why
[
0
]
==
32
:
del
socket_map
[
the_trigger
.
_fileno
]
the_trigger
=
simple_trigger
()
# adds itself back into socket_map
the_trigger
.
pull_trigger
(
thunk
)
lib/python/ZServer/PubCore/ZRendezvous.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import
thread
from
ZServerPublisher
import
ZServerPublisher
class
ZRendevous
:
def
__init__
(
self
,
n
=
1
):
sync
=
thread
.
allocate_lock
()
self
.
_a
=
sync
.
acquire
self
.
_r
=
sync
.
release
pool
=
[]
self
.
_lists
=
pool
,
[],
[]
self
.
_a
()
try
:
while
n
>
0
:
l
=
thread
.
allocate_lock
()
l
.
acquire
()
pool
.
append
(
l
)
thread
.
start_new_thread
(
ZServerPublisher
,
(
self
.
accept
,))
n
=
n
-
1
finally
:
self
.
_r
()
def
accept
(
self
):
self
.
_a
()
try
:
pool
,
requests
,
ready
=
self
.
_lists
while
not
requests
:
l
=
pool
[
-
1
]
del
pool
[
-
1
]
ready
.
append
(
l
)
self
.
_r
()
l
.
acquire
()
self
.
_a
()
pool
.
append
(
l
)
r
=
requests
[
0
]
del
requests
[
0
]
return
r
finally
:
self
.
_r
()
def
handle
(
self
,
name
,
request
,
response
):
self
.
_a
()
try
:
pool
,
requests
,
ready
=
self
.
_lists
requests
.
append
((
name
,
request
,
response
))
if
ready
:
l
=
ready
[
-
1
]
del
ready
[
-
1
]
l
.
release
()
finally
:
self
.
_r
()
lib/python/ZServer/PubCore/ZServerPublisher.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from
ZPublisher
import
publish_module
class
ZServerPublisher
:
def
__init__
(
self
,
accept
):
while
1
:
try
:
name
,
request
,
response
=
accept
()
publish_module
(
name
,
request
=
request
,
response
=
response
)
finally
:
response
.
_finish
()
request
=
response
=
None
lib/python/ZServer/PubCore/__init__.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import
ZRendezvous
_handle
=
None
_n
=
1
def
handle
(
*
args
,
**
kw
):
global
_handle
if
_handle
is
None
:
_handle
=
ZRendezvous
.
ZRendevous
(
_n
).
handle
return
apply
(
_handle
,
args
,
kw
)
def
setNumberOfThreads
(
n
):
global
_n
_n
=
n
global
setNumberOfThreads
del
setNumberOfThreads
lib/python/ZServer/README.txt
0 → 100644
View file @
c658c172
ZServer README
--------------
What is ZServer?
ZServer is an integration of the Zope application server and the
Medusa information server. See the ZServer architecture document for
more information::
http://www.zope.org/Documentation/Reference/ZServer
ZServer gives you HTTP, FTP, WebDAV, PCGI, and remote interactive
Python access. In later releases it will probably offer more
protocols such as FastCGI, etc.
What is Medusa?
Medusa is a Python server framework with uses a single threaded
asynchronous sockets approach. For more information see::
http://www.nightmare.com/medusa
There's also an interesting Medusa tutorial at::
http://www.nightmare.com:8080/nm/apps/medusa/docs/programming.html
ZServer HTTP support
ZServer offers HTTP 1.1 publishing for Zope. It does not support
publishing files from the file system. You can specify the HTTP port
using the -w command line argument for the z2.py start script. You
can also specify CGI environment variables on the command line using
z2.py
ZServer FTP support
What you can do with FTP
FTP access to Zope allows you to FTP to the Zope object hierarchy
in order to perform managerial tasks. You can:
* Navigate the object hierarchy with 'cd'
* Replace the content of Documents, Images, and Files
* Create Documents, Images, Files, Folders
* Delete objects and Folders.
So basically you can do more than is possible with HTTP PUT. Also,
unlike PUT, FTP gives you access to Document content. So when you
download a Document you are getting its content, not what it looks
like when it is rendered.
Using FTP
To FTP into Zope, ZServer must be configured to serve FTP. By
default ZServer serves FTP on port 9221. So to connect to Zope you
would issue a command like so::
$ ftp localhost 9221
When logging in to FTP, you have some choices. You can connect
anonymously by using a username of 'anonymous' and any password.
Or you can login as a Zope user. Since Zope users are defined at
different locations in the object hierarchy, authentication can be
problematic. There are two solutions:
* login and then cd to the directory where you are defined.
* login with a special name that indicates where you are
defined.
The format of the special name is '<username>@<path>'. For
example::
joe@Marketing/Projects
FTP permissions
FTP support is provided for Folders, Documents, Images, and Files.
You can control access to FTP via the new 'FTP access' permission.
This permission controls the ability to 'cd' to a Folder and to
download objects. Uploading and deleting and creating objects are
controlled by existing permissions.
FTP limits
You can set limits for the number of simultaneous FTP connections.
You can separately configure the number of anonymous and
authenticated connections. Right now this setting is set in
'ZServerFTP.py'. In the future, it may be more easy to configure.
Properties and FTP: The next step
The next phase of FTP support will allow you to edit properties of
all Zope objects. Probably properties will be exposed via special
files which will contain an XML representation of the object's
properties. You could then download the file, edit the XML and
upload it to change the object's properties.
We do not currently have a target date for FTP property support.
How does FTP work?
The ZServer's FTP channel object translates FTP requests into
ZPublisher requests. The FTP channel then analyses the response
and formulates an appropriate FTP response. The FTP channel
stores some state such as the current working directory and the
username and password.
On the Zope side of things, the 'lib/python/OFS/FTPInterface.py'
module defines the Zope FTP interface, for listing sub-items,
stating, and getting content. The interface is implemented in
'SimpleItem', and in other Zope classes. Programmers will not
need to implement the entire interface if they inherit from
'SimpleItem'. All the other FTP functions are handled by
existing methods like 'manage_delObjects', and 'PUT', etc.
ZServer PCGI support
ZServer will service PCGI requests with both inet and unix domain
sockets. This means you can use ZServer instead of
'pcgi_publisher.py' as your long running PCGI server process. In the
future, PCGI may be able to activate ZServer.
Using PCGI instead of HTTP allows you to forward requests from
another web server to ZServer. The CGI environment and HTTP headers
are controlled by the web server, so you don't need to worry about
managing the ZServer environment. However, this configuration will
impose a larger overhead than simply using the web server as an HTTP
proxy for ZServer.
To use PCGI, configure your PCGI info files to communicate with
ZServer by setting the PCGI_PORT, PCGI_SOCKET_FILE, and PCGI_NAME.
The other PCGI settings are currently ignored by ZServer.
ZServer's PCGI support will work with mod_pcgi.
ZServer monitor server
ZServer now includes the Medusa monitor server. This basically gives
you a remote, secure Python prompt. You can interactively access Zope.
This is a very powerful, but dangerous tool. Be careful.
To use the monitor server specify a monitor port number using the -m
option with the z2.py start script. The default port is 9999.
To connect to the monitor server use the 'ZServer/medusa/monitor_client.py'
or 'ZServer/medusa/monitor_client_win32.py' script. For example::
$ python2.1 ZServer/medusa/monitor_client.py localhost 9999
You will then be asked to enter a password. This is the Zope super manager
password which is stored in the 'access' file.
Then you will be greeted with a Python prompt. To access Zope import
the Zope module::
>>> import Zope
The Zope top level Zope object is available via the 'Zope.app' function::
>>> a=Zope.app()
From this object you can reach all other Zope objects as subobjects.
Remember if you make changes to Zope objects and want those changes to be
saved you need to commmit the transaction::
>>> get_transaction().commit()
ZServer WebDAV support
WebDAV is a new protocol for managing web resources. WebDAV operates
over HTTP. Since WebDAV uses HTTP, ZServer doesn't really have to do
anything special, except stay out of Zope's way when handling WebDAV
requests.
The only major WebDAV client at this time is Internet Explorer 5. It
works with Zope.
Differences between ZopeHTTPServer and ZServer
ZopeHTTPServer is old and no longer being actively maintained.
Both ZopeHTTPServer and ZServer are Python HTTP servers.
ZopeHTTPServer is built on the standard Python SimpleHTTPServer
framework. ZServer is built on Medusa.
ZopeHTTPServer is very limited. It can only publish one module at a
time. It can only publish via HTTP. It has no support for thread
pools.
ZServer on the other hand is more complex and supports publishing
multiple modules, thread pools, and it uses a new threaded
architecture for accessing ZPublisher.
Running ZServer as nobody
Normally ZServer will run with the userid of the user who starts
it. However, if ZServer is started by root, it will attempt to
become nobody or any userid you specify with the -u argument to the
z2.py start script.
ZServer is similar to ZopeHTTPServer in these respects.
If you run Zope with different userids you must be aware of
permission issues. Zope must be able to read and write to the 'var'
directory. If you change the userid Zope is running under you will
probably need to change the permissions on the 'var' directory
and the files in it in order for Zope to run under a different
userid.
Support
Questions and comments should go to 'support@digicool.com'.
You can report bugs and check on the status of bugs using the Zope
bug collector::
http://www.zope.org/Resources/Collector/
License
ZServer is covered by the ZPL despite the fact that it comes with
much of the Medusa source code. The portions of Medusa that come
with ZServer are licensed under the ZPL.
Outstanding issues
The FTP interface for Zope objects may be changed.
HTTP 1.1 support is ZServer is incomplete, though it should work for
most HTTP 1.1 clients.
lib/python/ZServer/WebDAVSrcHandler.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""HTTP handler which forces GET requests to return the document source.
Works around current WebDAV clients' failure to implement the
'source-link' feature of the specification. Uses manage_FTPget().
"""
import
os
import
posixpath
from
ZServer.HTTPServer
import
zhttp_handler
__version__
=
"1.0"
class
WebDAVSrcHandler
(
zhttp_handler
):
def
get_environment
(
self
,
request
):
"""Munge the request to ensure that we call manage_FTPGet."""
env
=
zhttp_handler
.
get_environment
(
self
,
request
)
# Set a flag to indicate this request came through the WebDAV source
# port server.
env
[
'WEBDAV_SOURCE_PORT'
]
=
1
if
env
[
'REQUEST_METHOD'
]
==
'GET'
:
path_info
=
env
[
'PATH_INFO'
]
if
os
.
sep
!=
'/'
:
path_info
=
path_info
.
replace
(
os
.
sep
,
'/'
)
path_info
=
posixpath
.
join
(
path_info
,
'manage_FTPget'
)
path_info
=
posixpath
.
normpath
(
path_info
)
env
[
'PATH_INFO'
]
=
path_info
# Workaround for lousy WebDAV implementation of M$ Office 2K.
# Requests for "index_html" are *sometimes* send as "index_html."
# We check the user-agent and remove a trailing dot for PATH_INFO
# and PATH_TRANSLATED
if
env
.
get
(
"HTTP_USER_AGENT"
,
""
).
find
(
"Microsoft Data Access Internet Publishing Provider"
)
>
-
1
:
if
env
[
"PATH_INFO"
][
-
1
]
==
'.'
:
env
[
"PATH_INFO"
]
=
env
[
"PATH_INFO"
][:
-
1
]
if
env
[
"PATH_TRANSLATED"
][
-
1
]
==
'.'
:
env
[
"PATH_TRANSLATED"
]
=
env
[
"PATH_TRANSLATED"
][:
-
1
]
return
env
lib/python/ZServer/ZService.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
ZServer as a NT service.
The serice starts up and monitors a ZServer process.
Features:
* When you start the service it starts ZServer
* When you stop the serivice it stops ZServer
* It monitors ZServer and restarts it if it exits abnormally
* If ZServer is shutdown from the web, the service stops.
* If ZServer cannot be restarted, the service stops.
Usage:
Installation
The ZServer service should be installed by the Zope Windows
installer. You can manually install, uninstall the service from
the commandline.
ZService.py [options] install|update|remove|start [...]
|stop|restart [...]|debug [...]
Options for 'install' and 'update' commands only:
--username domain
\
use
r
name : The Username the service is to run
under
--password password : The password for the username
--startup [manual|auto|disabled] : How the service starts,
default = manual
Commands
install : Installs the service
update : Updates the service, use this when you change
ZServer.py
remove : Removes the service
start : Starts the service, this can also be done from the
services control panel
stop : Stops the service, this can also be done from the
services control panel
restart : Restarts the service
debug : Runs the service in debug mode
You can view the usage options by running ZServer.py without any
arguments.
Note: you may have to register the Python service program first,
win32
\
py
t
honservice.exe /register
Starting Zope
Start Zope by clicking the 'start' button in the services control
panel. You can set Zope to automatically start at boot time by
choosing 'Auto' startup by clicking the 'statup' button.
Stopping Zope
Stop Zope by clicking the 'stop' button in the services control
panel. You can also stop Zope through the web by going to the
Zope control panel and by clicking 'Shutdown'.
Event logging
Zope events are logged to the NT application event log. Use the
event viewer to keep track of Zope events.
Registry Settings
You can change how the service starts ZServer by editing a registry
key.
HKEY_LOCAL_MACHINE
\
SYSTEM
\
CurrentControlSet
\
Se
r
vices
\
<Service Name>
\
P
a
rameters
\
s
t
art
The value of this key is the command which the service uses to
start ZServer. For example:
"C:
\
P
r
ogram Files
\
Zope
\
bin
\
py
t
hon.exe"
"C:
\
P
r
ogram Files
\
Zope
\
z2.py" -w 8888
TODO:
* Integrate it into the Windows installer.
* Add ZLOG logging in addition to event log logging.
* Make it easier to run multiple Zope services with one Zope install
This script does for NT the same sort of thing zdaemon.py does for UNIX.
Requires Python win32api extensions.
"""
import
sys
,
os
,
time
,
imp
# Some fancy path footwork is required here because we
# may be run from python.exe or lib/win32/PythonService.exe
home
=
os
.
path
.
split
(
os
.
path
.
split
(
sys
.
executable
)[
0
])[
0
]
if
sys
.
executable
[
-
10
:]
!=
'python.exe'
:
home
=
os
.
path
.
split
(
home
)[
0
]
home
=
os
.
path
.
split
(
home
)[
0
]
sys
.
path
.
append
(
os
.
path
.
join
(
home
,
'bin'
))
sys
.
path
.
append
(
os
.
path
.
join
(
home
,
'ZServer'
))
sys
.
path
.
append
(
os
.
path
.
join
(
home
,
'bin'
,
'lib'
,
'win32'
))
sys
.
path
.
append
(
os
.
path
.
join
(
home
,
'bin'
,
'lib'
,
'win32'
,
'lib'
))
# pythoncom and pywintypes are special, and require these hacks when
# we dont have a standard Python installation around.
import
win32api
def
magic_import
(
modulename
,
filename
):
# by Mark Hammond
try
:
# See if it does import first!
return
__import__
(
modulename
)
except
ImportError
:
pass
# win32 can find the DLL name.
h
=
win32api
.
LoadLibrary
(
filename
)
found
=
win32api
.
GetModuleFileName
(
h
)
# Python can load the module
mod
=
imp
.
load_module
(
modulename
,
None
,
found
,
(
'.dll'
,
'rb'
,
imp
.
C_EXTENSION
))
# inject it into the global module list.
sys
.
modules
[
modulename
]
=
mod
# And finally inject it into the namespace.
globals
()[
modulename
]
=
mod
win32api
.
FreeLibrary
(
h
)
magic_import
(
'pywintypes'
,
'pywintypes21.dll'
)
import
win32serviceutil
,
win32service
,
win32event
,
win32process
try
:
import
servicemanager
except
:
pass
class
ZServerService
(
win32serviceutil
.
ServiceFramework
):
# Some trickery to determine the service name. The WISE
# installer will write an svcname.txt to the ZServer dir
# that we can use to figure out our service name.
path
=
os
.
path
.
join
(
home
,
'ZServer'
,
'svcname.txt'
)
file
=
open
(
path
,
'r'
)
_svc_name_
=
file
.
readline
().
strip
()
file
.
close
()
_svc_display_name_
=
"Zope (%s)"
%
_svc_name_
restart_min_time
=
5
# if ZServer restarts before this many
# seconds then we have a problem, and
# need to stop the service.
def
__init__
(
self
,
args
):
win32serviceutil
.
ServiceFramework
.
__init__
(
self
,
args
)
self
.
hWaitStop
=
win32event
.
CreateEvent
(
None
,
0
,
0
,
None
)
def
SvcDoRun
(
self
):
self
.
start_zserver
()
while
1
:
rc
=
win32event
.
WaitForMultipleObjects
(
(
self
.
hWaitStop
,
self
.
hZServer
),
0
,
win32event
.
INFINITE
)
if
rc
-
win32event
.
WAIT_OBJECT_0
==
0
:
break
else
:
self
.
restart_zserver
()
self
.
ReportServiceStatus
(
win32service
.
SERVICE_STOP_PENDING
,
5000
)
def
SvcStop
(
self
):
servicemanager
.
LogInfoMsg
(
'Stopping Zope.'
)
try
:
self
.
stop_zserver
()
except
:
pass
self
.
ReportServiceStatus
(
win32service
.
SERVICE_STOP_PENDING
)
win32event
.
SetEvent
(
self
.
hWaitStop
)
def
start_zserver
(
self
):
sc
=
self
.
get_start_command
()
result
=
win32process
.
CreateProcess
(
None
,
self
.
get_start_command
(),
None
,
None
,
0
,
0
,
None
,
None
,
win32process
.
STARTUPINFO
())
self
.
hZServer
=
result
[
0
]
self
.
last_start_time
=
time
.
time
()
servicemanager
.
LogInfoMsg
(
'Starting Zope.'
)
def
stop_zserver
(
self
):
win32process
.
TerminateProcess
(
self
.
hZServer
,
0
)
def
restart_zserver
(
self
):
if
time
.
time
()
-
self
.
last_start_time
<
self
.
restart_min_time
:
servicemanager
.
LogErrorMsg
(
'Zope died and could not be restarted.'
)
self
.
SvcStop
()
code
=
win32process
.
GetExitCodeProcess
(
self
.
hZServer
)
if
code
==
0
:
# Exited with a normal status code,
# assume that shutdown is intentional.
self
.
SvcStop
()
else
:
servicemanager
.
LogWarningMsg
(
'Restarting Zope.'
)
self
.
start_zserver
()
def
get_start_command
(
self
):
return
win32serviceutil
.
GetServiceCustomOption
(
self
,
'start'
)
def
set_start_command
(
value
):
"sets the ZServer start command if the start command is not already set"
current
=
win32serviceutil
.
GetServiceCustomOption
(
ZServerService
,
'start'
,
None
)
if
current
is
None
:
win32serviceutil
.
SetServiceCustomOption
(
ZServerService
,
'start'
,
value
)
if
__name__
==
'__main__'
:
win32serviceutil
.
HandleCommandLine
(
ZServerService
)
if
'install'
in
sys
.
argv
:
command
=
'"%s" "%s" -S'
%
(
sys
.
executable
,
os
.
path
.
join
(
home
,
'z2.py'
))
set_start_command
(
command
)
print
"Setting ZServer start command to:"
,
command
lib/python/ZServer/__init__.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import
sys
from
medusa.test
import
max_sockets
CONNECTION_LIMIT
=
max_sockets
.
max_select_sockets
()
ZSERVER_VERSION
=
'1.1'
try
:
import
App.version_txt
ZOPE_VERSION
=
App
.
version_txt
.
version_txt
()
except
:
ZOPE_VERSION
=
'experimental'
exit_code
=
0
# Try to poke zLOG default logging into asyncore
# XXX We should probably should do a better job of this,
# however that would mean that ZServer required zLOG.
# (Is that really a bad thing?)
try
:
from
zLOG
import
LOG
,
register_subsystem
,
BLATHER
,
INFO
,
WARNING
,
ERROR
except
ImportError
:
pass
else
:
register_subsystem
(
'ZServer'
)
severity
=
{
'info'
:
INFO
,
'warning'
:
WARNING
,
'error'
:
ERROR
}
def
log_info
(
self
,
message
,
type
=
'info'
):
if
message
[:
14
]
==
'adding channel'
or
\
message
[:
15
]
==
'closing channel'
or
\
message
==
'Computing default hostname'
:
LOG
(
'ZServer'
,
BLATHER
,
message
)
else
:
LOG
(
'ZServer'
,
severity
[
type
],
message
)
import
asyncore
asyncore
.
dispatcher
.
log_info
=
log_info
# A routine to try to arrange for request sockets to be closed
# on exec. This makes it easier for folks who spawn long running
# processes from Zope code. Thanks to Dieter Maurer for this.
try
:
import
fcntl
def
requestCloseOnExec
(
sock
):
try
:
fcntl
.
fcntl
(
sock
.
fileno
(),
fcntl
.
F_SETFD
,
fcntl
.
FD_CLOEXEC
)
except
:
# XXX What was this supposed to catch?
pass
except
(
ImportError
,
AttributeError
):
def
requestCloseOnExec
(
sock
):
pass
import
asyncore
from
medusa
import
resolver
,
logger
from
HTTPServer
import
zhttp_server
,
zhttp_handler
from
PCGIServer
import
PCGIServer
from
FCGIServer
import
FCGIServer
from
FTPServer
import
FTPServer
from
PubCore
import
setNumberOfThreads
from
medusa.monitor
import
secure_monitor_server
# override the service name in logger.syslog_logger
logger
.
syslog_logger
.
svc_name
=
'ZServer'
lib/python/ZServer/component.xml
0 → 100644
View file @
c658c172
<component
prefix=
"ZServer.datatypes"
>
<abstracttype
name=
"server"
>
<description>
The "server" type is used to describe a single type of server
instance. The value for a server section is an object with the
ServerFactory interface.
</description>
</abstracttype>
<sectiontype
name=
"http-server"
datatype=
".HTTPServerFactory"
implements=
"server"
>
<key
name=
"address"
datatype=
"inet-address"
/>
<key
name=
"force-connection-close"
datatype=
"boolean"
default=
"off"
/>
<key
name=
"webdav-source-clients"
>
<description>
Regular expression used to identify clients who should
receive WebDAV source responses to GET requests.
</description>
</key>
</sectiontype>
<sectiontype
name=
"webdav-source-server"
datatype=
".WebDAVSourceServerFactory"
implements=
"server"
>
<key
name=
"address"
datatype=
"inet-address"
/>
<key
name=
"force-connection-close"
datatype=
"boolean"
default=
"off"
/>
</sectiontype>
<sectiontype
name=
"persistent-cgi"
datatype=
".PCGIServerFactory"
implements=
"server"
>
<key
name=
"path"
datatype=
"existing-file"
/>
</sectiontype>
<sectiontype
name=
"fast-cgi"
datatype=
".FCGIServerFactory"
implements=
"server"
>
<key
name=
"address"
datatype=
"socket-address"
/>
</sectiontype>
<sectiontype
name=
"ftp-server"
datatype=
".FTPServerFactory"
implements=
"server"
>
<key
name=
"address"
datatype=
"inet-address"
/>
</sectiontype>
<sectiontype
name=
"monitor-server"
datatype=
".MonitorServerFactory"
implements=
"server"
>
<key
name=
"address"
datatype=
"inet-address"
/>
</sectiontype>
<sectiontype
name=
"icp-server"
datatype=
".ICPServerFactory"
implements=
"server"
>
<key
name=
"address"
datatype=
"inet-address"
/>
</sectiontype>
</component>
lib/python/ZServer/datatypes.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""ZConfig datatype support for ZServer.
Each server type is represented by a ServerFactory instance.
"""
class
ServerFactory
:
def
__init__
(
self
,
address
=
None
):
if
address
is
None
:
self
.
host
=
None
self
.
port
=
None
else
:
self
.
host
,
self
.
port
=
address
def
prepare
(
self
,
defaulthost
=
None
,
dnsresolver
=
None
,
module
=
None
,
env
=
None
,
portbase
=
None
):
if
defaulthost
is
not
None
:
self
.
_set_default_host
(
defaulthost
)
self
.
dnsresolver
=
dnsresolver
self
.
module
=
module
self
.
cgienv
=
env
if
portbase
and
self
.
port
is
not
None
:
self
.
port
+=
portbase
def
_set_default_host
(
self
,
host
):
if
not
self
.
host
:
self
.
host
=
host
def
servertype
(
self
):
s
=
self
.
__class__
.
__name__
if
s
.
endswith
(
"Factory"
):
s
=
s
[:
-
7
]
return
s
def
create
(
self
):
raise
NotImplementedError
(
"Concrete ServerFactory classes must implement create()."
)
class
HTTPServerFactory
(
ServerFactory
):
def
__init__
(
self
,
section
):
ServerFactory
.
__init__
(
self
,
section
.
address
)
self
.
force_connection_close
=
section
.
force_connection_close
# webdav-source-server sections won't have webdav_source_clients:
webdav_clients
=
getattr
(
section
,
"webdav_source_clients"
,
None
)
self
.
webdav_source_clients
=
webdav_clients
def
create
(
self
):
from
ZServer
import
HTTPServer
from
ZServer.AccessLogger
import
access_logger
handler
=
self
.
createHandler
()
handler
.
_force_connection_close
=
self
.
force_connection_close
if
self
.
webdav_source_clients
:
handler
.
set_webdav_source_clients
(
self
.
webdav_source_clients
)
server
=
HTTPServer
.
zhttp_server
(
ip
=
self
.
host
,
port
=
self
.
port
,
resolver
=
self
.
dnsresolver
,
logger_object
=
access_logger
)
server
.
install_handler
(
handler
)
return
server
def
createHandler
(
self
):
from
ZServer
import
HTTPServer
return
HTTPServer
.
zhttp_handler
(
self
.
module
,
''
,
self
.
cgienv
)
class
WebDAVSourceServerFactory
(
HTTPServerFactory
):
def
createHandler
(
self
):
from
ZServer.WebDAVSrcHandler
import
WebDAVSrcHandler
return
WebDAVSrcHandler
(
self
.
module
,
''
,
self
.
cgienv
)
class
FTPServerFactory
(
ServerFactory
):
def
__init__
(
self
,
section
):
ServerFactory
.
__init__
(
self
,
section
.
address
)
def
create
(
self
):
from
ZServer.AccessLogger
import
access_logger
from
ZServer.FTPServer
import
FTPServer
return
FTPServer
(
ip
=
self
.
host
,
port
=
self
.
port
,
module
=
self
.
module
,
resolver
=
self
.
dnsresolver
,
logger_object
=
access_logger
)
class
PCGIServerFactory
(
ServerFactory
):
def
__init__
(
self
,
section
):
ServerFactory
.
__init__
(
self
)
self
.
path
=
section
.
path
def
create
(
self
):
from
ZServer.AccessLogger
import
access_logger
from
ZServer.PCGIServer
import
PCGIServer
return
PCGIServer
(
ip
=
self
.
host
,
port
=
self
.
port
,
module
=
self
.
module
,
resolver
=
self
.
dnsresolver
,
pcgi_file
=
self
.
path
,
logger_object
=
access_logger
)
class
FCGIServerFactory
(
ServerFactory
):
def
__init__
(
self
,
section
):
import
socket
if
section
.
address
.
family
==
socket
.
AF_INET
:
address
=
section
.
address
.
address
path
=
None
else
:
address
=
None
path
=
section
.
address
.
address
ServerFactory
.
__init__
(
self
,
address
)
self
.
path
=
path
def
_set_default_host
(
self
,
host
):
if
self
.
path
is
None
:
ServerFactory
.
_set_default_host
(
self
,
host
)
def
create
(
self
):
from
ZServer.AccessLogger
import
access_logger
from
ZServer.FCGIServer
import
FCGIServer
return
FCGIServer
(
ip
=
self
.
host
,
port
=
self
.
port
,
socket_file
=
self
.
path
,
module
=
self
.
module
,
resolver
=
self
.
dnsresolver
,
logger_object
=
access_logger
)
class
MonitorServerFactory
(
ServerFactory
):
def
__init__
(
self
,
section
):
ServerFactory
.
__init__
(
self
,
section
.
address
)
def
create
(
self
):
from
ZServer.medusa.monitor
import
secure_monitor_server
return
secure_monitor_server
(
hostname
=
self
.
host
,
port
=
self
.
port
,
password
=
self
.
getPassword
())
def
getPassword
(
self
):
# XXX This is really out of place; there should be a better
# way. For now, at least we can make it a separate method.
import
ZODB
# :-( required to import user
from
AccessControl.User
import
emergency_user
if
hasattr
(
emergency_user
,
'__null_user__'
):
pw
=
None
else
:
pw
=
emergency_user
.
_getPassword
()
if
pw
is
None
:
import
zLOG
zLOG
.
LOG
(
"Zope"
,
zLOG
.
WARNING
,
'Monitor server not started'
' because no emergency user exists.'
)
return
pw
class
ICPServerFactory
(
ServerFactory
):
def
__init__
(
self
,
section
):
ServerFactory
.
__init__
(
self
,
section
.
address
)
def
create
(
self
):
from
ZServer.ICPServer
import
ICPServer
return
ICPServer
(
self
.
host
,
self
.
port
)
lib/python/ZServer/medusa/__init__.py
0 → 100644
View file @
c658c172
# Make medusa into a package
__version__
=
'$Revision: 1.9 $'
[
11
:
-
2
]
lib/python/ZServer/medusa/chat_server.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: chat_server.py,v 1.5 2003/03/18 21:15:16 fdrake Exp $'
import
string
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
import
socket
import
asyncore
import
asynchat
import
status_handler
class
chat_channel
(
asynchat
.
async_chat
):
def
__init__
(
self
,
server
,
sock
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
''
self
.
nick
=
None
self
.
push
(
'nickname?: '
)
def
collect_incoming_data
(
self
,
data
):
self
.
data
=
self
.
data
+
data
def
found_terminator
(
self
):
line
=
self
.
data
self
.
data
=
''
if
self
.
nick
is
None
:
self
.
nick
=
string
.
split
(
line
)[
0
]
if
not
self
.
nick
:
self
.
nick
=
None
self
.
push
(
'huh? gimmee a nickname: '
)
else
:
self
.
greet
()
else
:
if
not
line
:
pass
elif
line
[
0
]
!=
'/'
:
self
.
server
.
push_line
(
self
,
line
)
else
:
self
.
handle_command
(
line
)
def
greet
(
self
):
self
.
push
(
'Hello, %s
\
r
\
n
'
%
self
.
nick
)
num_channels
=
len
(
self
.
server
.
channels
)
-
1
if
num_channels
==
0
:
self
.
push
(
'[Kinda lonely in here... you
\
'
re the only caller!]
\
r
\
n
'
)
else
:
self
.
push
(
'[There are %d other callers]
\
r
\
n
'
%
(
len
(
self
.
server
.
channels
)
-
1
))
nicks
=
map
(
lambda
x
:
x
.
get_nick
(),
self
.
server
.
channels
.
keys
())
self
.
push
(
string
.
join
(
nicks
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
)
self
.
server
.
push_line
(
self
,
'[joined]'
)
def
handle_command
(
self
,
command
):
import
types
command_line
=
string
.
split
(
command
)
name
=
'cmd_%s'
%
command_line
[
0
][
1
:]
if
hasattr
(
self
,
name
):
# make sure it's a method...
method
=
getattr
(
self
,
name
)
if
type
(
method
)
==
type
(
self
.
handle_command
):
method
(
command_line
[
1
:])
else
:
self
.
push
(
'unknown command: %s'
%
command_line
[
0
])
def
cmd_quit
(
self
,
args
):
self
.
server
.
push_line
(
self
,
'[left]'
)
self
.
push
(
'Goodbye!
\
r
\
n
'
)
self
.
close_when_done
()
# alias for '/quit' - '/q'
cmd_q
=
cmd_quit
def
push_line
(
self
,
nick
,
line
):
self
.
push
(
'%s: %s
\
r
\
n
'
%
(
nick
,
line
))
def
handle_close
(
self
):
self
.
close
()
def
close
(
self
):
del
self
.
server
.
channels
[
self
]
asynchat
.
async_chat
.
close
(
self
)
def
get_nick
(
self
):
if
self
.
nick
is
not
None
:
return
self
.
nick
else
:
return
'Unknown'
class
chat_server
(
asyncore
.
dispatcher
):
SERVER_IDENT
=
'Chat Server (V%s)'
%
VERSION
channel_class
=
chat_channel
spy
=
1
def
__init__
(
self
,
ip
=
''
,
port
=
8518
):
self
.
port
=
port
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
bind
((
ip
,
port
))
print
'%s started on port %d'
%
(
self
.
SERVER_IDENT
,
port
)
self
.
listen
(
5
)
self
.
channels
=
{}
self
.
count
=
0
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
self
.
count
=
self
.
count
+
1
print
'client #%d - %s:%d'
%
(
self
.
count
,
addr
[
0
],
addr
[
1
])
self
.
channels
[
self
.
channel_class
(
self
,
conn
,
addr
)]
=
1
def
push_line
(
self
,
from_channel
,
line
):
nick
=
from_channel
.
get_nick
()
if
self
.
spy
:
print
'%s: %s'
%
(
nick
,
line
)
for
c
in
self
.
channels
.
keys
():
if
c
is
not
from_channel
:
c
.
push
(
'%s: %s
\
r
\
n
'
%
(
nick
,
line
))
def
status
(
self
):
lines
=
[
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br>Listening on Port: %d'
%
self
.
port
,
'<br><b>Total Sessions:</b> %d'
%
self
.
count
,
'<br><b>Current Sessions:</b> %d'
%
(
len
(
self
.
channels
))
]
return
status_handler
.
lines_producer
(
lines
)
def
writable
(
self
):
return
0
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
>
1
:
port
=
string
.
atoi
(
sys
.
argv
[
1
])
else
:
port
=
8518
s
=
chat_server
(
''
,
port
)
asyncore
.
loop
()
lib/python/ZServer/medusa/counter.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# It is tempting to add an __int__ method to this class, but it's not
# a good idea. This class tries to gracefully handle integer
# overflow, and to hide this detail from both the programmer and the
# user. Note that the __str__ method can be relied on for printing out
# the value of a counter:
#
# >>> print 'Total Client: %s' % self.total_clients
#
# If you need to do arithmetic with the value, then use the 'as_long'
# method, the use of long arithmetic is a reminder that the counter
# will overflow.
class
counter
:
"general-purpose counter"
def
__init__
(
self
,
initial_value
=
0
):
self
.
value
=
initial_value
def
increment
(
self
,
delta
=
1
):
result
=
self
.
value
try
:
self
.
value
=
self
.
value
+
delta
except
OverflowError
:
self
.
value
=
long
(
self
.
value
)
+
delta
return
result
def
decrement
(
self
,
delta
=
1
):
result
=
self
.
value
try
:
self
.
value
=
self
.
value
-
delta
except
OverflowError
:
self
.
value
=
long
(
self
.
value
)
-
delta
return
result
def
as_long
(
self
):
return
long
(
self
.
value
)
def
__nonzero__
(
self
):
return
self
.
value
!=
0
def
__repr__
(
self
):
return
'<counter value=%s at %x>'
%
(
self
.
value
,
id
(
self
))
def
__str__
(
self
):
return
str
(
long
(
self
.
value
))[:
-
1
]
lib/python/ZServer/medusa/default_handler.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: default_handler.py,v 1.9 2003/03/18 21:15:16 fdrake Exp $'
# standard python modules
import
os
import
re
import
posixpath
import
stat
import
string
import
time
# medusa modules
import
http_date
import
http_server
import
mime_type_table
import
status_handler
import
producers
unquote
=
http_server
.
unquote
# This is the 'default' handler. it implements the base set of
# features expected of a simple file-delivering HTTP server. file
# services are provided through a 'filesystem' object, the very same
# one used by the FTP server.
#
# You can replace or modify this handler if you want a non-standard
# HTTP server. You can also derive your own handler classes from
# it.
#
# support for handling POST requests is available in the derived
# class <default_with_post_handler>, defined below.
#
from
counter
import
counter
class
default_handler
:
valid_commands
=
[
'get'
,
'head'
]
IDENT
=
'Default HTTP Request Handler'
# Pathnames that are tried when a URI resolves to a directory name
directory_defaults
=
[
'index.html'
,
'default.html'
]
default_file_producer
=
producers
.
file_producer
def
__init__
(
self
,
filesystem
):
self
.
filesystem
=
filesystem
# count total hits
self
.
hit_counter
=
counter
()
# count file deliveries
self
.
file_counter
=
counter
()
# count cache hits
self
.
cache_counter
=
counter
()
hit_counter
=
0
def
__repr__
(
self
):
return
'<%s (%s hits) at %x>'
%
(
self
.
IDENT
,
self
.
hit_counter
,
id
(
self
)
)
# always match, since this is a default
def
match
(
self
,
request
):
return
1
# handle a file request, with caching.
def
handle_request
(
self
,
request
):
if
request
.
command
not
in
self
.
valid_commands
:
request
.
error
(
400
)
# bad request
return
self
.
hit_counter
.
increment
()
path
,
params
,
query
,
fragment
=
request
.
split_uri
()
if
'%'
in
path
:
path
=
unquote
(
path
)
# strip off all leading slashes
while
path
and
path
[
0
]
==
'/'
:
path
=
path
[
1
:]
if
self
.
filesystem
.
isdir
(
path
):
if
path
and
path
[
-
1
]
!=
'/'
:
request
[
'Location'
]
=
'http://%s/%s/'
%
(
request
.
channel
.
server
.
server_name
,
path
)
request
.
error
(
301
)
return
# we could also generate a directory listing here,
# may want to move this into another method for that
# purpose
found
=
0
if
path
and
path
[
-
1
]
!=
'/'
:
path
=
path
+
'/'
for
default
in
self
.
directory_defaults
:
p
=
path
+
default
if
self
.
filesystem
.
isfile
(
p
):
path
=
p
found
=
1
break
if
not
found
:
request
.
error
(
404
)
# Not Found
return
elif
not
self
.
filesystem
.
isfile
(
path
):
request
.
error
(
404
)
# Not Found
return
file_length
=
self
.
filesystem
.
stat
(
path
)[
stat
.
ST_SIZE
]
ims
=
get_header_match
(
IF_MODIFIED_SINCE
,
request
.
header
)
length_match
=
1
if
ims
:
length
=
ims
.
group
(
4
)
if
length
:
try
:
length
=
string
.
atoi
(
length
)
if
length
!=
file_length
:
length_match
=
0
except
:
pass
ims_date
=
0
if
ims
:
ims_date
=
http_date
.
parse_http_date
(
ims
.
group
(
1
))
try
:
mtime
=
self
.
filesystem
.
stat
(
path
)[
stat
.
ST_MTIME
]
except
:
request
.
error
(
404
)
return
if
length_match
and
ims_date
:
if
mtime
<=
ims_date
:
request
.
reply_code
=
304
request
.
done
()
self
.
cache_counter
.
increment
()
return
try
:
file
=
self
.
filesystem
.
open
(
path
,
'rb'
)
except
IOError
:
request
.
error
(
404
)
return
request
[
'Last-Modified'
]
=
http_date
.
build_http_date
(
mtime
)
request
[
'Content-Length'
]
=
file_length
self
.
set_content_type
(
path
,
request
)
if
request
.
command
==
'get'
:
request
.
push
(
self
.
default_file_producer
(
file
))
self
.
file_counter
.
increment
()
request
.
done
()
def
set_content_type
(
self
,
path
,
request
):
ext
=
string
.
lower
(
get_extension
(
path
))
if
mime_type_table
.
content_type_map
.
has_key
(
ext
):
request
[
'Content-Type'
]
=
mime_type_table
.
content_type_map
[
ext
]
else
:
# TODO: test a chunk off the front of the file for 8-bit
# characters, and use application/octet-stream instead.
request
[
'Content-Type'
]
=
'text/plain'
def
status
(
self
):
return
producers
.
simple_producer
(
'<li>%s'
%
status_handler
.
html_repr
(
self
)
+
'<ul>'
+
' <li><b>Total Hits:</b> %s'
%
self
.
hit_counter
+
' <li><b>Files Delivered:</b> %s'
%
self
.
file_counter
+
' <li><b>Cache Hits:</b> %s'
%
self
.
cache_counter
+
'</ul>'
)
# HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
# to this header. I suppose it's purpose is to avoid the overhead
# of parsing dates...
IF_MODIFIED_SINCE
=
re
.
compile
(
'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)'
,
re
.
IGNORECASE
)
USER_AGENT
=
re
.
compile
(
'User-Agent: (.*)'
,
re
.
IGNORECASE
)
CONTENT_TYPE
=
re
.
compile
(
r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\
(
\)+_,./:=?-]+)$)|$)'
,
re
.
IGNORECASE
)
get_header
=
http_server
.
get_header
get_header_match
=
http_server
.
get_header_match
def
get_extension
(
path
):
dirsep
=
string
.
rfind
(
path
,
'/'
)
dotsep
=
string
.
rfind
(
path
,
'.'
)
if
dotsep
>
dirsep
:
return
path
[
dotsep
+
1
:]
else
:
return
''
lib/python/ZServer/medusa/dist/license.html
0 → 100644
View file @
c658c172
<html>
<head>
<title>
Licensing terms for Medusa
</title>
</head>
<body>
<h1>
Medusa is now Free!
</h1>
<p>
Medusa was previously distributed under a 'free for
non-commercial use' license. In May of 2000 I changed the
license to be identical to the standard Python license. The
standard Python license has always applied to the core
components of Medusa, this change just frees up the rest of the
system, including the http server, ftp server, utilities, etc..
</p>
<p>
I would like to take this opportunity to thank all of the folks
who supported Medusa over the years by purchasing commercial
licenses.
</p>
</body>
</html>
lib/python/ZServer/medusa/docs/README.html
0 → 100644
View file @
c658c172
<html>
<body>
Medusa is Copyright 1996-1997, Sam Rushing (rushing@nightmare.com)
<hr>
<pre>
Medusa is provided free for all non-commercial use. If you are using
Medusa to make money, or you would like to distribute Medusa or any
derivative of Medusa commercially, then you must arrange a license
with me. Extension authors may either negotiate with me to include
their extension in the main distribution, or may distribute under
their own terms.
You may modify or extend Medusa, but you may not redistribute the
modified versions without permission.
<b>
NIGHTMARE SOFTWARE AND SAM RUSHING DISCLAIM ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS, IN NO EVENT SHALL NIGHTMARE SOFTWARE OR SAM RUSHING BE
LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE.
</b>
</pre>
For more information please contact me at
<a
href=
"mailto:rushing@nightmare.com"
>
rushing@nightmare.com
</a>
<h1>
What is Medusa?
</h1>
<hr>
<p>
Medusa is an architecture for very-high-performance TCP/IP servers
(like HTTP, FTP, and NNTP). Medusa is different from most other
servers because it runs as a single process, multiplexing I/O with its
various client and server connections within a single process/thread.
<p>
It is capable of smoother and higher performance than most other
servers, while placing a dramatically reduced load on the server
machine. The single-process, single-thread model simplifies design
and enables some new persistence capabilities that are otherwise
difficult or impossible to implement.
<p>
Medusa is supported on any platform that can run Python and includes a
functional implementation of the
<
socket
>
and
<
select
>
modules. This includes the majority of Unix implementations.
<p>
During development, it is constantly tested on Linux and Win32
[Win95/WinNT], but the core asynchronous capability has been shown to
work on several other platforms, including the Macintosh. It might
even work on VMS.
<h2>
The Power of Python
</h2>
<p>
A distinguishing feature of Medusa is that it is written entirely in
Python. Python (
<a
href=
"http://www.python.org/"
>
http://www.python.org/
</a>
) is a
'very-high-level' object-oriented language developed by Guido van
Rossum (currently at CNRI). It is easy to learn, and includes many
modern programming features such as storage management, dynamic
typing, and an extremely flexible object system. It also provides
convenient interfaces to C and C++.
<p>
The rapid prototyping and delivery capabilities are hard to exaggerate;
for example
<ul>
<li>
It took me longer to read the documentation for persistent HTTP
connections (the 'Keep-Alive' connection token) than to add the
feature to Medusa.
<li>
A simple IRC-like chat server system was written in about 90 minutes.
</ul>
<p>
I've heard similar stories from alpha test sites, and other users of
the core async library.
<h2>
Server Notes
</h2>
<p>
Both the FTP and HTTP servers use an abstracted 'filesystem object' to
gain access to a given directory tree. One possible server extension
technique would be to build behavior into this filesystem object,
rather than directly into the server: Then the extension could be
shared with both the FTP and HTTP servers.
<h3>
HTTP
</h3>
<p>
The core HTTP server itself is quite simple - all functionality is
provided through 'extensions'. Extensions can be plugged in
dynamically. [i.e., you could log in to the server via the monitor
service and add or remove an extension on the fly]. The basic
file-delivery service is provided by a 'default' extension, which
matches all URI's. You can build more complex behavior by replacing
or extending this class.
<p>
The default extension includes support for the 'Connection: Keep-Alive'
token, and will re-use a client channel when requested by the client.
<h3>
FTP
</h3>
<p>
On Unix, the ftp server includes support for 'real' users, so that it
may be used as a drop-in replacement for the normal ftp server. Since
most ftp servers on Unix use the 'forking' model, each child process
changes its user/group persona after a successful login. This is a
appears to be a secure design.
<p>
Medusa takes a different approach - whenever Medusa performs an
operation for a particular user [listing a directory, opening a file],
it temporarily switches to that user's persona _only_ for the duration
of the operation. [and each such operation is protected by a
try/finally exception handler].
<p>
To do this Medusa MUST run with super-user privileges. This is a
HIGHLY experimental approach, and although it has been thoroughly
tested on Linux, security problems may still exist. If you are
concerned about the security of your server machine, AND YOU SHOULD
BE, I suggest running Medusa's ftp server in anonymous-only mode,
under an account with limited privileges ('nobody' is usually used for
this purpose).
<p>
I am very interested in any feedback on this feature, most
especially information on how the server behaves on different
implementations of Unix, and of course any security problems that are
found.
<hr>
<h3>
Monitor
</h3>
<p>
The monitor server gives you remote, 'back-door' access to your server
while it is running. It implements a remote python interpreter. Once
connected to the monitor, you can do just about anything you can do from
the normal python interpreter. You can examine data structures, servers,
connection objects. You can enable or disable extensions, restart the server,
reload modules, etc...
<p>
The monitor server is protected with an MD5-based authentication
similar to that proposed in RFC1725 for the POP3 protocol. The server
sends the client a timestamp, which is then appended to a secret
password. The resulting md5 digest is sent back to the server, which
then compares this to the expected result. Failed login attempts are
logged and immediately disconnected. The password itself is not sent
over the network (unless you have foolishly transmitted it yourself
through an insecure telnet or X11 session. 8^)
<p>
For this reason telnet cannot be used to connect to the monitor
server when it is in a secure mode (the default). A client program is
provided for this purpose. You will be prompted for a password when
starting up the server, and by the monitor client.
<p>
For extra added security on Unix, the monitor server will
eventually be able to use a Unix-domain socket, which can be protected
behind a 'firewall' directory (similar to the InterNet News server).
<hr>
<h2>
Performance Notes
</h2>
<h3>
The
<code>
select()
</code>
function
</h3>
<p>
At the heart of Medusa is a single
<code>
select()
</code>
loop.
This loop handles all open socket connections, both servers and
clients. It is in effect constantly asking the system: 'which of
these sockets has activity?'. Performance of this system call can
vary widely between operating systems.
<p>
There are also often builtin limitations to the number of sockets
('file descriptors') that a single process, or a whole system, can
manipulate at the same time. Early versions of Linux placed draconian
limits (256) that have since been raised. Windows 95 has a limit of
64, while OSF/1 seems to allow up to 4096.
<p>
These limits don't affect only Medusa, you will find them described
in the documentation for other web and ftp servers, too.
<p>
The documentation for the Apache web server has some excellent
notes on tweaking performance for various Unix implementations. See
<a
href=
"http://www.apache.org/docs/misc/perf.html"
>
http://www.apache.org/docs/misc/perf.html
</a>
for more information.
<h3>
Buffer sizes
</h3>
<p>
The default buffer sizes used by Medusa are set with a bias toward
Internet-based servers: They are relatively small, so that the buffer
overhead for each connection is low. The assumption is that Medusa
will be talking to a large number of low-bandwidth connections, rather
than a smaller number of high bandwidth.
<p>
This choice trades run-time memory use for efficiency - the down
side of this is that high-speed local connections (i.e., over a local
ethernet) will transfer data at a slower rate than necessary.
<p>
This parameter can easily be tweaked by the site designer, and can
in fact be adjusted on a per-server or even per-client basis. For
example, you could have the FTP server use larger buffer sizes for
connections from certain domains.
<p>
If there's enough interest, I have some rough ideas for how to make
these buffer sizes automatically adjust to an optimal setting. Send
email if you'd like to see this feature.
<hr>
<p>
See
<a
href=
"medusa.html"
>
./medusa.html
</a>
for a brief overview of
some of the ideas behind Medusa's design, and for a description of
current and upcoming features.
<p><h3>
Enjoy!
</h3>
<hr>
<br>
-Sam Rushing
<br><a
href=
"mailto:rushing@nightmare.com"
>
rushing@nightmare.com
</a>
<!--
Local Variables:
indent-use-tabs: nil
end:
-->
</body>
</html>
lib/python/ZServer/medusa/docs/composing_producers.gif
0 → 100755
View file @
c658c172
2.64 KB
lib/python/ZServer/medusa/docs/data_flow.gif
0 → 100755
View file @
c658c172
4 KB
lib/python/ZServer/medusa/docs/data_flow.html
0 → 100644
View file @
c658c172
<h1>
Data Flow in Medusa
</h1>
<img
src=
"data_flow.gif"
>
<p>
Data flow, both input and output, is asynchronous. This is
signified by the
<i>
request
</i>
and
<i>
reply
</i>
queues in the above
diagram. This means that both requests and replies can get 'backed
up', and are still handled correctly. For instance, HTTP/1.1 supports
the concept of
<i>
pipelined requests
</i>
, where a series of requests
are sent immediately to a server, and the replies are sent as they are
processed. With a
<i>
synchronous
</i>
request, the client would have
to wait for a reply to each request before sending the next.
</p>
<p>
The input data is partitioned into requests by looking for a
<i>
terminator
</i>
. A terminator is simply a protocol-specific
delimiter - often simply CRLF (carriage-return line-feed), though it
can be longer (for example, MIME multi-part boundaries can be
specified as terminators). The protocol handler is notified whenever
a complete request has been received.
</p>
<p>
The protocol handler then generates a reply, which is enqueued for
output back to the client. Sometimes, instead of queuing the actual
data, an object that will generate this data is used, called a
<i>
producer
</i>
.
</p>
<img
src=
"producers.gif"
>
<p>
The use of
<code>
producers
</code>
gives the programmer
extraordinary control over how output is generated and inserted into
the output queue. Though they are simple objects (requiring only a
single method,
<i>
more()
</i>
, to be defined), they can be
<i>
composed
</i>
- simple producers can be wrapped around each other to
create arbitrarily complex behaviors. [now would be a good time to
browse through some of the producer classes in
<code>
producers.py
</code>
.]
</p>
<p>
The HTTP/1.1 producers make an excellent example. HTTP allows
replies to be encoded in various ways - for example a reply consisting
of dynamically-generated output might use the 'chunked' transfer
encoding to send data that is compressed on-the-fly.
</p>
<img
src=
"composing_producers.gif"
>
<p>
In the diagram, green producers actually generate output, and grey
ones transform it in some manner. This producer might generate output
looking like this:
<pre>
HTTP/1.1 200 OK
Content-Encoding: gzip
Transfer-Encoding: chunked
Header ==> Date: Mon, 04 Aug 1997 21:31:44 GMT
Content-Type: text/html
Server: Medusa/3.0
Chunking ==> 0x200
Compression ==>
<512
bytes
of
compressed
html
>
0x200
<512
bytes
of
compressed
html
>
...
0
</pre>
<p>
Still more can be done with this output stream: For the purpose of
efficiency, it makes sense to send output in large, fixed-size chunks:
This transformation can be applied by wrapping a 'globbing' producer
around the whole thing.
</p>
<p>
An important feature of Medusa's producers is that they are
actually rather small objects that do not expand into actual output
data until the moment they are needed: The
<code>
async_chat
</code>
class will only call on a producer for output when the outgoing socket
has indicated that it is ready for data. Thus Medusa is extremely
efficient when faced with network delays, 'hiccups', and low bandwidth
clients.
<p>
One final note: The mechanisms described above are completely
general - although the examples given demonstrate application to the
<code>
http
</code>
protocol, Medusa's asynchronous core has been
applied to many different protocols, including
<code>
smtp
</code>
,
<code>
pop3
</code>
,
<code>
ftp
</code>
, and even
<code>
dns
</code>
.
lib/python/ZServer/medusa/docs/producers.gif
0 → 100755
View file @
c658c172
5.68 KB
lib/python/ZServer/medusa/docs/proxy_notes.txt
0 → 100644
View file @
c658c172
# we can build 'promises' to produce external data. Each producer
# contains a 'promise' to fetch external data (or an error
# message). writable() for that channel will only return true if the
# top-most producer is ready. This state can be flagged by the dns
# client making a callback.
# So, say 5 proxy requests come in, we can send out DNS queries for
# them immediately. If the replies to these come back before the
# promises get to the front of the queue, so much the better: no
# resolve delay. 8^)
#
# ok, there's still another complication:
# how to maintain replies in order?
# say three requests come in, (to different hosts? can this happen?)
# yet the connections happen third, second, and first. We can't buffer
# the entire request! We need to be able to specify how much to buffer.
#
# ===========================================================================
#
# the current setup is a 'pull' model: whenever the channel fires FD_WRITE,
# we 'pull' data from the producer fifo. what we need is a 'push' option/mode,
# where
# 1) we only check for FD_WRITE when data is in the buffer
# 2) whoever is 'pushing' is responsible for calling 'refill_buffer()'
#
# what is necessary to support this 'mode'?
# 1) writable() only fires when data is in the buffer
# 2) refill_buffer() is only called by the 'pusher'.
#
# how would such a mode affect things? with this mode could we support
# a true http/1.1 proxy? [i.e, support <n> pipelined proxy requests, possibly
# to different hosts, possibly even mixed in with non-proxy requests?] For
# example, it would be nice if we could have the proxy automatically apply the
# 1.1 chunking for 1.0 close-on-eof replies when feeding it to the client. This
# would let us keep our persistent connection.
lib/python/ZServer/medusa/event_loop.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# This is an alternative event loop that supports 'schedulable events'.
# You can specify an event callback to take place after <n> seconds.
# Important usage note: The granularity of the time-check is limited
# by the <timeout> argument to 'go()'; if there is little or no
# activity and you specify a 30-second timeout interval, then the
# schedule of events may only be checked at those 30-second intervals.
# In other words, if you need 1-second resolution, you will have to
# poll at 1-second intervals. This facility is more useful for longer
# timeouts ("if the channel doesn't close in 5 minutes, then forcibly
# close it" would be a typical usage).
import
asyncore
import
bisect
import
time
socket_map
=
asyncore
.
socket_map
class
event_loop
:
def
__init__
(
self
):
self
.
events
=
[]
self
.
num_channels
=
0
self
.
max_channels
=
0
def
go
(
self
,
timeout
=
30.0
,
granularity
=
15
):
global
socket_map
last_event_check
=
0
while
socket_map
:
now
=
int
(
time
.
time
())
if
(
now
-
last_event_check
)
>=
granularity
:
last_event_check
=
now
fired
=
[]
# yuck. i want my lisp.
i
=
j
=
0
while
i
<
len
(
self
.
events
):
when
,
what
=
self
.
events
[
i
]
if
now
>=
when
:
fired
.
append
(
what
)
j
=
i
+
1
else
:
break
i
=
i
+
1
if
fired
:
self
.
events
=
self
.
events
[
j
:]
for
what
in
fired
:
what
(
self
,
now
)
# sample the number of channels
n
=
len
(
asyncore
.
socket_map
)
self
.
num_channels
=
n
if
n
>
self
.
max_channels
:
self
.
max_channels
=
n
asyncore
.
poll
(
timeout
)
def
schedule
(
self
,
delta
,
callback
):
now
=
int
(
time
.
time
())
bisect
.
insort
(
self
.
events
,
(
now
+
delta
,
callback
))
def
__len__
(
self
):
return
len
(
self
.
events
)
class
test
(
asyncore
.
dispatcher
):
def
__init__
(
self
):
asyncore
.
dispatcher
.
__init__
(
self
)
def
handle_connect
(
self
):
print
'Connected!'
def
writable
(
self
):
return
not
self
.
connected
def
connect_timeout_callback
(
self
,
event_loop
,
when
):
if
not
self
.
connected
:
print
'Timeout on connect'
self
.
close
()
def
periodic_thing_callback
(
self
,
event_loop
,
when
):
print
'A Periodic Event has Occurred!'
# re-schedule it.
event_loop
.
schedule
(
15
,
self
.
periodic_thing_callback
)
if
__name__
==
'__main__'
:
import
socket
el
=
event_loop
()
t
=
test
()
t
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
el
.
schedule
(
10
,
t
.
connect_timeout_callback
)
el
.
schedule
(
15
,
t
.
periodic_thing_callback
)
t
.
connect
((
'squirl'
,
80
))
el
.
go
(
1.0
)
lib/python/ZServer/medusa/fifo.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# fifo, implemented with lisp-style pairs.
# [quick translation of scheme48/big/queue.scm]
class
fifo
:
def
__init__
(
self
):
self
.
head
,
self
.
tail
=
None
,
None
self
.
length
=
0
self
.
node_cache
=
None
def
__len__
(
self
):
return
self
.
length
def
push
(
self
,
v
):
self
.
node_cache
=
None
self
.
length
=
self
.
length
+
1
p
=
[
v
,
None
]
if
self
.
head
is
None
:
self
.
head
=
p
else
:
self
.
tail
[
1
]
=
p
self
.
tail
=
p
def
pop
(
self
):
self
.
node_cache
=
None
pair
=
self
.
head
if
pair
is
None
:
raise
ValueError
,
"pop() from an empty queue"
else
:
self
.
length
=
self
.
length
-
1
[
value
,
next
]
=
pair
self
.
head
=
next
if
next
is
None
:
self
.
tail
=
None
return
value
def
first
(
self
):
if
self
.
head
is
None
:
raise
ValueError
,
"first() of an empty queue"
else
:
return
self
.
head
[
0
]
def
push_front
(
self
,
thing
):
self
.
node_cache
=
None
self
.
length
=
self
.
length
+
1
old_head
=
self
.
head
new_head
=
[
thing
,
old_head
]
self
.
head
=
new_head
if
old_head
is
None
:
self
.
tail
=
new_head
def
_nth
(
self
,
n
):
i
=
n
h
=
self
.
head
while
i
:
h
=
h
[
1
]
i
=
i
-
1
self
.
node_cache
=
n
,
h
[
1
]
return
h
[
0
]
def
__getitem__
(
self
,
index
):
if
(
index
<
0
)
or
(
index
>=
self
.
length
):
raise
IndexError
,
"index out of range"
else
:
if
self
.
node_cache
:
j
,
h
=
self
.
node_cache
if
j
==
index
-
1
:
result
=
h
[
0
]
self
.
node_cache
=
index
,
h
[
1
]
return
result
else
:
return
self
.
_nth
(
index
)
else
:
return
self
.
_nth
(
index
)
class
protected_fifo
:
def
__init__
(
self
,
lock
=
None
):
if
lock
is
None
:
import
thread
self
.
lock
=
thread
.
allocate_lock
()
else
:
self
.
lock
=
lock
self
.
fifo
=
fifo
.
fifo
()
def
push
(
self
,
item
):
try
:
self
.
lock
.
acquire
()
self
.
fifo
.
push
(
item
)
finally
:
self
.
lock
.
release
()
enqueue
=
push
def
pop
(
self
):
try
:
self
.
lock
.
acquire
()
return
self
.
fifo
.
pop
()
finally
:
self
.
lock
.
release
()
dequeue
=
pop
def
__len__
(
self
):
try
:
self
.
lock
.
acquire
()
return
len
(
self
.
queue
)
finally
:
self
.
lock
.
release
()
class
output_fifo
:
EMBEDDED
=
'embedded'
EOF
=
'eof'
TRIGGER
=
'trigger'
def
__init__
(
self
):
# containment, not inheritance
self
.
fifo
=
fifo
()
self
.
_embedded
=
None
def
push_embedded
(
self
,
fifo
):
# push embedded fifo
fifo
.
parent
=
self
# CYCLE
self
.
fifo
.
push
((
self
.
EMBEDDED
,
fifo
))
def
push_eof
(
self
):
# push end-of-fifo
self
.
fifo
.
push
((
self
.
EOF
,
None
))
def
push_trigger
(
self
,
thunk
):
self
.
fifo
.
push
((
self
.
TRIGGER
,
thunk
))
def
push
(
self
,
item
):
# item should be a producer or string
self
.
fifo
.
push
(
item
)
# 'length' is an inaccurate term. we should
# probably use an 'empty' method instead.
def
__len__
(
self
):
if
self
.
_embedded
is
None
:
return
len
(
self
.
fifo
)
else
:
return
len
(
self
.
_embedded
)
def
empty
(
self
):
return
len
(
self
)
==
0
def
first
(
self
):
if
self
.
_embedded
is
None
:
return
self
.
fifo
.
first
()
else
:
return
self
.
_embedded
.
first
()
def
pop
(
self
):
if
self
.
_embedded
is
not
None
:
return
self
.
_embedded
.
pop
()
else
:
result
=
self
.
fifo
.
pop
()
# unset self._embedded
self
.
_embedded
=
None
# check for special items in the front
if
len
(
self
.
fifo
):
front
=
self
.
fifo
.
first
()
if
type
(
front
)
is
type
(()):
# special
kind
,
value
=
front
if
kind
is
self
.
EMBEDDED
:
self
.
_embedded
=
value
elif
kind
is
self
.
EOF
:
# break the cycle
parent
=
self
.
parent
self
.
parent
=
None
# pop from parent
parent
.
_embedded
=
None
elif
kind
is
self
.
TRIGGER
:
# call the trigger thunk
value
()
# remove the special
self
.
fifo
.
pop
()
# return the originally popped result
return
result
def
test_embedded
():
of
=
output_fifo
()
f2
=
output_fifo
()
f3
=
output_fifo
()
of
.
push
(
'one'
)
of
.
push_embedded
(
f2
)
f2
.
push
(
'two'
)
f3
.
push
(
'three'
)
f3
.
push
(
'four'
)
f2
.
push_embedded
(
f3
)
f3
.
push_eof
()
f2
.
push
(
'five'
)
f2
.
push_eof
()
of
.
push
(
'six'
)
of
.
push
(
'seven'
)
while
1
:
print
of
.
pop
()
lib/python/ZServer/medusa/filesys.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# $Id: filesys.py,v 1.13 2003/03/18 21:15:16 fdrake Exp $
# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
#
# We want to provide a complete wrapper around any and all
# filesystem operations.
# this class is really just for documentation,
# identifying the API for a filesystem object.
# opening files for reading, and listing directories, should
# return a producer.
class
abstract_filesystem
:
def
__init__
(
self
):
pass
def
current_directory
(
self
):
"Return a string representing the current directory."
pass
def
listdir
(
self
,
path
,
long
=
0
):
"""Return a listing of the directory at 'path' The empty string
indicates the current directory. If 'long' is set, instead
return a list of (name, stat_info) tuples
"""
pass
def
open
(
self
,
path
,
mode
):
"Return an open file object"
pass
def
stat
(
self
,
path
):
"Return the equivalent of os.stat() on the given path."
pass
def
isdir
(
self
,
path
):
"Does the path represent a directory?"
pass
def
isfile
(
self
,
path
):
"Does the path represent a plain file?"
pass
def
cwd
(
self
,
path
):
"Change the working directory."
pass
def
cdup
(
self
):
"Change to the parent of the current directory."
pass
def
longify
(
self
,
path
):
"""Return a 'long' representation of the filename
[for the output of the LIST command]"""
pass
# standard wrapper around a unix-like filesystem, with a 'false root'
# capability.
# security considerations: can symbolic links be used to 'escape' the
# root? should we allow it? if not, then we could scan the
# filesystem on startup, but that would not help if they were added
# later. We will probably need to check for symlinks in the cwd method.
# what to do if wd is an invalid directory?
import
os
,
re
import
stat
import
string
def
safe_stat
(
path
):
try
:
return
(
path
,
os
.
stat
(
path
))
except
:
return
None
import
glob
class
os_filesystem
:
path_module
=
os
.
path
# set this to zero if you want to disable pathname globbing.
# [we currently don't glob, anyway]
do_globbing
=
1
def
__init__
(
self
,
root
,
wd
=
'/'
):
self
.
root
=
root
self
.
wd
=
wd
def
current_directory
(
self
):
return
self
.
wd
def
isfile
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
return
self
.
path_module
.
isfile
(
self
.
translate
(
p
))
def
isdir
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
return
self
.
path_module
.
isdir
(
self
.
translate
(
p
))
def
cwd
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
translated_path
=
self
.
translate
(
p
)
if
not
self
.
path_module
.
isdir
(
translated_path
):
return
0
else
:
old_dir
=
os
.
getcwd
()
# temporarily change to that directory, in order
# to see if we have permission to do so.
try
:
can
=
0
try
:
os
.
chdir
(
translated_path
)
can
=
1
self
.
wd
=
p
except
:
pass
finally
:
if
can
:
os
.
chdir
(
old_dir
)
return
can
def
cdup
(
self
):
return
self
.
cwd
(
'..'
)
def
listdir
(
self
,
path
,
long
=
0
):
p
=
self
.
translate
(
path
)
# I think we should glob, but limit it to the current
# directory only.
ld
=
os
.
listdir
(
p
)
if
not
long
:
return
list_producer
(
ld
,
0
,
None
)
else
:
old_dir
=
os
.
getcwd
()
try
:
os
.
chdir
(
p
)
# if os.stat fails we ignore that file.
result
=
filter
(
None
,
map
(
safe_stat
,
ld
))
finally
:
os
.
chdir
(
old_dir
)
return
list_producer
(
result
,
1
,
self
.
longify
)
# TODO: implement a cache w/timeout for stat()
def
stat
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
stat
(
p
)
def
open
(
self
,
path
,
mode
):
p
=
self
.
translate
(
path
)
return
open
(
p
,
mode
)
def
unlink
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
unlink
(
p
)
def
mkdir
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
mkdir
(
p
)
def
rmdir
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
rmdir
(
p
)
# utility methods
def
normalize
(
self
,
path
):
# watch for the ever-sneaky '/+' path element
path
=
re
.
sub
(
'/+'
,
'/'
,
path
)
p
=
self
.
path_module
.
normpath
(
path
)
# remove 'dangling' cdup's.
if
len
(
p
)
>
2
and
p
[:
3
]
==
'/..'
:
p
=
'/'
return
p
def
translate
(
self
,
path
):
# we need to join together three separate
# path components, and do it safely.
# <real_root>/<current_directory>/<path>
# use the operating system's path separator.
path
=
string
.
join
(
string
.
split
(
path
,
'/'
),
os
.
sep
)
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
root
,
p
[
1
:]))
return
p
def
longify
(
self
,
(
path
,
stat_info
)):
return
unix_longify
(
path
,
stat_info
)
def
__repr__
(
self
):
return
'<unix-style fs root:%s wd:%s>'
%
(
self
.
root
,
self
.
wd
)
if
os
.
name
==
'posix'
:
class
unix_filesystem
(
os_filesystem
):
pass
class
schizophrenic_unix_filesystem
(
os_filesystem
):
PROCESS_UID
=
os
.
getuid
()
PROCESS_EUID
=
os
.
geteuid
()
PROCESS_GID
=
os
.
getgid
()
PROCESS_EGID
=
os
.
getegid
()
def
__init__
(
self
,
root
,
wd
=
'/'
,
persona
=
(
None
,
None
)):
os_filesystem
.
__init__
(
self
,
root
,
wd
)
self
.
persona
=
persona
def
become_persona
(
self
):
if
self
.
persona
is
not
(
None
,
None
):
uid
,
gid
=
self
.
persona
# the order of these is important!
os
.
setegid
(
gid
)
os
.
seteuid
(
uid
)
def
become_nobody
(
self
):
if
self
.
persona
is
not
(
None
,
None
):
os
.
seteuid
(
self
.
PROCESS_UID
)
os
.
setegid
(
self
.
PROCESS_GID
)
# cwd, cdup, open, listdir
def
cwd
(
self
,
path
):
try
:
self
.
become_persona
()
return
os_filesystem
.
cwd
(
self
,
path
)
finally
:
self
.
become_nobody
()
def
cdup
(
self
,
path
):
try
:
self
.
become_persona
()
return
os_filesystem
.
cdup
(
self
)
finally
:
self
.
become_nobody
()
def
open
(
self
,
filename
,
mode
):
try
:
self
.
become_persona
()
return
os_filesystem
.
open
(
self
,
filename
,
mode
)
finally
:
self
.
become_nobody
()
def
listdir
(
self
,
path
,
long
=
0
):
try
:
self
.
become_persona
()
return
os_filesystem
.
listdir
(
self
,
path
,
long
)
finally
:
self
.
become_nobody
()
# This hasn't been very reliable across different platforms.
# maybe think about a separate 'directory server'.
#
# import posixpath
# import fcntl
# import FCNTL
# import select
# import asyncore
#
# # pipes /bin/ls for directory listings.
# class unix_filesystem (os_filesystem):
# pass
# path_module = posixpath
#
# def listdir (self, path, long=0):
# p = self.translate (path)
# if not long:
# return list_producer (os.listdir (p), 0, None)
# else:
# command = '/bin/ls -l %s' % p
# print 'opening pipe to "%s"' % command
# fd = os.popen (command, 'rt')
# return pipe_channel (fd)
#
# # this is both a dispatcher, _and_ a producer
# class pipe_channel (asyncore.file_dispatcher):
# buffer_size = 4096
#
# def __init__ (self, fd):
# asyncore.file_dispatcher.__init__ (self, fd)
# self.fd = fd
# self.done = 0
# self.data = ''
#
# def handle_read (self):
# if len (self.data) < self.buffer_size:
# self.data = self.data + self.fd.read (self.buffer_size)
# #print '%s.handle_read() => len(self.data) == %d' % (self, len(self.data))
#
# def handle_expt (self):
# #print '%s.handle_expt()' % self
# self.done = 1
#
# def ready (self):
# #print '%s.ready() => %d' % (self, len(self.data))
# return ((len (self.data) > 0) or self.done)
#
# def more (self):
# if self.data:
# r = self.data
# self.data = ''
# elif self.done:
# self.close()
# self.downstream.finished()
# r = ''
# else:
# r = None
# #print '%s.more() => %s' % (self, (r and len(r)))
# return r
# For the 'real' root, we could obtain a list of drives, and then
# use that. Doesn't win32 provide such a 'real' filesystem?
# [yes, I think something like this "\\.\c\windows"]
class
msdos_filesystem
(
os_filesystem
):
def
longify
(
self
,
(
path
,
stat_info
)):
return
msdos_longify
(
path
,
stat_info
)
# A merged filesystem will let you plug other filesystems together.
# We really need the equivalent of a 'mount' capability - this seems
# to be the most general idea. So you'd use a 'mount' method to place
# another filesystem somewhere in the hierarchy.
# Note: this is most likely how I will handle ~user directories
# with the http server.
class
merged_filesystem
:
def
__init__
(
self
,
*
fsys
):
pass
# this matches the output of NT's ftp server (when in
# MSDOS mode) exactly.
def
msdos_longify
(
file
,
stat_info
):
if
stat
.
S_ISDIR
(
stat_info
[
stat
.
ST_MODE
]):
dir
=
'<DIR>'
else
:
dir
=
' '
date
=
msdos_date
(
stat_info
[
stat
.
ST_MTIME
])
return
'%s %s %8d %s'
%
(
date
,
dir
,
stat_info
[
stat
.
ST_SIZE
],
file
)
def
msdos_date
(
t
):
try
:
info
=
time
.
gmtime
(
t
)
except
:
info
=
time
.
gmtime
(
0
)
# year, month, day, hour, minute, second, ...
if
info
[
3
]
>
11
:
merid
=
'PM'
info
[
3
]
=
info
[
3
]
-
12
else
:
merid
=
'AM'
return
'%02d-%02d-%02d %02d:%02d%s'
%
(
info
[
1
],
info
[
2
],
info
[
0
]
%
100
,
info
[
3
],
info
[
4
],
merid
)
months
=
[
'Jan'
,
'Feb'
,
'Mar'
,
'Apr'
,
'May'
,
'Jun'
,
'Jul'
,
'Aug'
,
'Sep'
,
'Oct'
,
'Nov'
,
'Dec'
]
mode_table
=
{
'0'
:
'---'
,
'1'
:
'--x'
,
'2'
:
'-w-'
,
'3'
:
'-wx'
,
'4'
:
'r--'
,
'5'
:
'r-x'
,
'6'
:
'rw-'
,
'7'
:
'rwx'
}
import
time
def
unix_longify
(
file
,
stat_info
):
# for now, only pay attention to the lower bits
mode
=
(
'%o'
%
stat_info
[
stat
.
ST_MODE
])[
-
3
:]
mode
=
string
.
join
(
map
(
lambda
x
:
mode_table
[
x
],
mode
),
''
)
if
stat
.
S_ISDIR
(
stat_info
[
stat
.
ST_MODE
]):
dirchar
=
'd'
else
:
dirchar
=
'-'
date
=
ls_date
(
long
(
time
.
time
()),
stat_info
[
stat
.
ST_MTIME
])
user
=
str
(
stat_info
[
stat
.
ST_UID
].
replace
(
' '
,
'_'
))
group
=
str
(
stat_info
[
stat
.
ST_GID
].
replace
(
' '
,
'_'
))
if
user
==
'System_Processes'
:
user
=
'Sysproc'
if
group
==
'System_Processes'
:
group
=
'Sysproc'
return
'%s%s %3d %-8s %-8s %8d %s %s'
%
(
dirchar
,
mode
,
stat_info
[
stat
.
ST_NLINK
],
user
,
group
,
stat_info
[
stat
.
ST_SIZE
],
date
,
file
)
# Emulate the unix 'ls' command's date field.
# it has two formats - if the date is more than 180
# days in the past, then it's like this:
# Oct 19 1995
# otherwise, it looks like this:
# Oct 19 17:33
def
ls_date
(
now
,
t
):
try
:
info
=
time
.
gmtime
(
t
)
except
:
info
=
time
.
gmtime
(
0
)
# 15,600,000 == 86,400 * 180
if
(
now
-
t
)
>
15600000
:
return
'%s %2d %d'
%
(
months
[
info
[
1
]
-
1
],
info
[
2
],
info
[
0
]
)
else
:
return
'%s %2d %02d:%02d'
%
(
months
[
info
[
1
]
-
1
],
info
[
2
],
info
[
3
],
info
[
4
]
)
# ===========================================================================
# Producers
# ===========================================================================
class
list_producer
:
def
__init__
(
self
,
file_list
,
long
,
longify
):
self
.
file_list
=
file_list
self
.
long
=
long
self
.
longify
=
longify
self
.
done
=
0
def
ready
(
self
):
if
len
(
self
.
file_list
):
return
1
else
:
if
not
self
.
done
:
self
.
done
=
1
return
0
return
(
len
(
self
.
file_list
)
>
0
)
# this should do a pushd/popd
def
more
(
self
):
if
not
self
.
file_list
:
return
''
else
:
# do a few at a time
bunch
=
self
.
file_list
[:
50
]
if
self
.
long
:
bunch
=
map
(
self
.
longify
,
bunch
)
self
.
file_list
=
self
.
file_list
[
50
:]
return
string
.
joinfields
(
bunch
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
lib/python/ZServer/medusa/ftp_server.py
0 → 100755
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: ftp_server.py,v 1.23 2003/03/18 21:15:17 fdrake Exp $'
# An extensible, configurable, asynchronous FTP server.
#
# All socket I/O is non-blocking, however file I/O is currently
# blocking. Eventually file I/O may be made non-blocking, too, if it
# seems necessary. Currently the only CPU-intensive operation is
# getting and formatting a directory listing. [this could be moved
# into another process/directory server, or another thread?]
#
# Only a subset of RFC 959 is implemented, but much of that RFC is
# vestigial anyway. I've attempted to include the most commonly-used
# commands, using the feature set of wu-ftpd as a guide.
import
asyncore
import
asynchat
import
os
import
socket
import
stat
import
string
import
sys
import
time
# TODO: implement a directory listing cache. On very-high-load
# servers this could save a lot of disk abuse, and possibly the
# work of computing emulated unix ls output.
# Potential security problem with the FTP protocol? I don't think
# there's any verification of the origin of a data connection. Not
# really a problem for the server (since it doesn't send the port
# command, except when in PASV mode) But I think a data connection
# could be spoofed by a program with access to a sniffer - it could
# watch for a PORT command to go over a command channel, and then
# connect to that port before the server does.
# Unix user id's:
# In order to support assuming the id of a particular user,
# it seems there are two options:
# 1) fork, and seteuid in the child
# 2) carefully control the effective uid around filesystem accessing
# methods, using try/finally. [this seems to work]
if
RCS_ID
.
startswith
(
'$Id: '
):
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
else
:
VERSION
=
'0.0'
from
counter
import
counter
import
producers
import
status_handler
import
logger
import
string
class
ftp_channel
(
asynchat
.
async_chat
):
# defaults for a reliable __repr__
addr
=
(
'unknown'
,
'0'
)
# unset this in a derived class in order
# to enable the commands in 'self.write_commands'
read_only
=
1
write_commands
=
[
'appe'
,
'dele'
,
'mkd'
,
'rmd'
,
'rnfr'
,
'rnto'
,
'stor'
,
'stou'
]
restart_position
=
0
# comply with (possibly troublesome) RFC959 requirements
# This is necessary to correctly run an active data connection
# through a firewall that triggers on the source port (expected
# to be 'L-1', or 20 in the normal case).
bind_local_minus_one
=
0
def
__init__
(
self
,
server
,
conn
,
addr
):
self
.
server
=
server
self
.
current_mode
=
'a'
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
'
)
# client data port. Defaults to 'the same as the control connection'.
self
.
client_addr
=
(
addr
[
0
],
21
)
self
.
client_dc
=
None
self
.
in_buffer
=
''
self
.
closing
=
0
self
.
passive_acceptor
=
None
self
.
passive_connection
=
None
self
.
filesystem
=
None
self
.
authorized
=
0
# send the greeting
self
.
respond
(
'220 %s FTP server (Medusa Async V%s [experimental]) ready.'
%
(
self
.
server
.
hostname
,
VERSION
)
)
# def __del__ (self):
# print 'ftp_channel.__del__()'
# --------------------------------------------------
# async-library methods
# --------------------------------------------------
def
handle_expt
(
self
):
# this is handled below. not sure what I could
# do here to make that code less kludgish.
pass
def
collect_incoming_data
(
self
,
data
):
self
.
in_buffer
=
self
.
in_buffer
+
data
if
len
(
self
.
in_buffer
)
>
4096
:
# silently truncate really long lines
# (possible denial-of-service attack)
self
.
in_buffer
=
''
def
found_terminator
(
self
):
line
=
self
.
in_buffer
if
not
len
(
line
):
return
sp
=
string
.
find
(
line
,
' '
)
if
sp
!=
-
1
:
line
=
[
line
[:
sp
],
line
[
sp
+
1
:]]
else
:
line
=
[
line
]
command
=
string
.
lower
(
line
[
0
])
# watch especially for 'urgent' abort commands.
if
string
.
find
(
command
,
'abor'
)
!=
-
1
:
# strip off telnet sync chars and the like...
while
command
and
command
[
0
]
not
in
string
.
letters
:
command
=
command
[
1
:]
fun_name
=
'cmd_%s'
%
command
if
command
!=
'pass'
:
self
.
log
(
'<== %s'
%
repr
(
self
.
in_buffer
)[
1
:
-
1
])
else
:
self
.
log
(
'<== %s'
%
line
[
0
]
+
' <password>'
)
self
.
in_buffer
=
''
if
not
hasattr
(
self
,
fun_name
):
self
.
command_not_understood
(
line
[
0
])
return
fun
=
getattr
(
self
,
fun_name
)
if
(
not
self
.
authorized
)
and
(
command
not
in
(
'user'
,
'pass'
,
'help'
,
'quit'
)):
self
.
respond
(
'530 Please log in with USER and PASS'
)
elif
(
not
self
.
check_command_authorization
(
command
)):
self
.
command_not_authorized
(
command
)
else
:
try
:
result
=
apply
(
fun
,
(
line
,))
except
:
self
.
server
.
total_exceptions
.
increment
()
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
if
self
.
client_dc
:
try
:
self
.
client_dc
.
close
()
except
:
pass
self
.
respond
(
'451 Server Error: %s, %s: file: %s line: %s'
%
(
t
,
v
,
file
,
line
,
)
)
closed
=
0
def
close
(
self
):
if
not
self
.
closed
:
self
.
closed
=
1
if
self
.
passive_acceptor
:
self
.
passive_acceptor
.
close
()
if
self
.
client_dc
:
self
.
client_dc
.
close
()
self
.
server
.
closed_sessions
.
increment
()
asynchat
.
async_chat
.
close
(
self
)
# --------------------------------------------------
# filesystem interface functions.
# override these to provide access control or perform
# other functions.
# --------------------------------------------------
def
cwd
(
self
,
line
):
return
self
.
filesystem
.
cwd
(
line
[
1
])
def
cdup
(
self
,
line
):
return
self
.
filesystem
.
cdup
()
def
open
(
self
,
path
,
mode
):
return
self
.
filesystem
.
open
(
path
,
mode
)
# returns a producer
def
listdir
(
self
,
path
,
long
=
0
):
return
self
.
filesystem
.
listdir
(
path
,
long
)
def
get_dir_list
(
self
,
line
,
long
=
0
):
# we need to scan the command line for arguments to '/bin/ls'...
args
=
line
[
1
:]
path_args
=
[]
for
arg
in
args
:
if
arg
[
0
]
!=
'-'
:
path_args
.
append
(
arg
)
else
:
# ignore arguments
pass
if
len
(
path_args
)
<
1
:
dir
=
'.'
else
:
dir
=
path_args
[
0
]
return
self
.
listdir
(
dir
,
long
)
# --------------------------------------------------
# authorization methods
# --------------------------------------------------
def
check_command_authorization
(
self
,
command
):
if
command
in
self
.
write_commands
and
self
.
read_only
:
return
0
else
:
return
1
# --------------------------------------------------
# utility methods
# --------------------------------------------------
def
log
(
self
,
message
):
self
.
server
.
logger
.
log
(
self
.
addr
[
0
],
'%d %s'
%
(
self
.
addr
[
1
],
message
)
)
def
respond
(
self
,
resp
):
self
.
log
(
'==> %s'
%
resp
)
self
.
push
(
resp
+
'
\
r
\
n
'
)
def
command_not_understood
(
self
,
command
):
self
.
respond
(
"500 '%s': command not understood."
%
command
)
def
command_not_authorized
(
self
,
command
):
self
.
respond
(
"530 You are not authorized to perform the '%s' command"
%
(
command
)
)
def
make_xmit_channel
(
self
):
# In PASV mode, the connection may or may _not_ have been made
# yet. [although in most cases it is... FTP Explorer being
# the only exception I've yet seen]. This gets somewhat confusing
# because things may happen in any order...
pa
=
self
.
passive_acceptor
if
pa
:
if
pa
.
ready
:
# a connection has already been made.
conn
,
addr
=
self
.
passive_acceptor
.
ready
cdc
=
xmit_channel
(
self
,
addr
)
cdc
.
set_socket
(
conn
)
cdc
.
connected
=
1
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
else
:
# we're still waiting for a connect to the PASV port.
cdc
=
xmit_channel
(
self
)
else
:
# not in PASV mode.
ip
,
port
=
self
.
client_addr
cdc
=
xmit_channel
(
self
,
self
.
client_addr
)
cdc
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
if
self
.
bind_local_minus_one
:
cdc
.
bind
((
''
,
self
.
server
.
port
-
1
))
try
:
cdc
.
connect
((
ip
,
port
))
except
socket
.
error
,
why
:
self
.
respond
(
"425 Can't build data connection"
)
self
.
client_dc
=
cdc
# pretty much the same as xmit, but only right on the verge of
# being worth a merge.
def
make_recv_channel
(
self
,
fd
):
pa
=
self
.
passive_acceptor
if
pa
:
if
pa
.
ready
:
# a connection has already been made.
conn
,
addr
=
pa
.
ready
cdc
=
recv_channel
(
self
,
addr
,
fd
)
cdc
.
set_socket
(
conn
)
cdc
.
connected
=
1
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
else
:
# we're still waiting for a connect to the PASV port.
cdc
=
recv_channel
(
self
,
None
,
fd
)
else
:
# not in PASV mode.
ip
,
port
=
self
.
client_addr
cdc
=
recv_channel
(
self
,
self
.
client_addr
,
fd
)
cdc
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
try
:
cdc
.
connect
((
ip
,
port
))
except
socket
.
error
,
why
:
self
.
respond
(
"425 Can't build data connection"
)
self
.
client_dc
=
cdc
type_map
=
{
'a'
:
'ASCII'
,
'i'
:
'Binary'
,
'e'
:
'EBCDIC'
,
'l'
:
'Binary'
}
type_mode_map
=
{
'a'
:
't'
,
'i'
:
'b'
,
'e'
:
'b'
,
'l'
:
'b'
}
# --------------------------------------------------
# command methods
# --------------------------------------------------
def
cmd_type
(
self
,
line
):
'specify data transfer type'
# ascii, ebcdic, image, local <byte size>
t
=
string
.
lower
(
line
[
1
])
# no support for EBCDIC
# if t not in ['a','e','i','l']:
if
t
not
in
[
'a'
,
'i'
,
'l'
]:
self
.
command_not_understood
(
string
.
join
(
line
))
elif
t
==
'l'
and
(
len
(
line
)
>
2
and
line
[
2
]
!=
'8'
):
self
.
respond
(
'504 Byte size must be 8'
)
else
:
self
.
current_mode
=
t
self
.
respond
(
'200 Type set to %s.'
%
self
.
type_map
[
t
])
def
cmd_quit
(
self
,
line
):
'terminate session'
self
.
respond
(
'221 Goodbye.'
)
self
.
close_when_done
()
def
cmd_port
(
self
,
line
):
'specify data connection port'
info
=
string
.
split
(
line
[
1
],
','
)
ip
=
string
.
join
(
info
[:
4
],
'.'
)
port
=
string
.
atoi
(
info
[
4
])
*
256
+
string
.
atoi
(
info
[
5
])
# how many data connections at a time?
# I'm assuming one for now...
# TODO: we should (optionally) verify that the
# ip number belongs to the client. [wu-ftpd does this?]
self
.
client_addr
=
(
ip
,
port
)
self
.
respond
(
'200 PORT command successful.'
)
def
new_passive_acceptor
(
self
):
# ensure that only one of these exists at a time.
if
self
.
passive_acceptor
is
not
None
:
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
self
.
passive_acceptor
=
passive_acceptor
(
self
)
return
self
.
passive_acceptor
def
cmd_pasv
(
self
,
line
):
'prepare for server-to-server transfer'
pc
=
self
.
new_passive_acceptor
()
port
=
pc
.
addr
[
1
]
ip_addr
=
pc
.
control_channel
.
getsockname
()[
0
]
self
.
respond
(
'227 Entering Passive Mode (%s,%d,%d)'
%
(
string
.
join
(
string
.
split
(
ip_addr
,
'.'
),
','
),
port
/
256
,
port
%
256
)
)
self
.
client_dc
=
None
def
cmd_nlst
(
self
,
line
):
'give name list of files in directory'
# ncftp adds the -FC argument for the user-visible 'nlist'
# command. We could try to emulate ls flags, but not just yet.
if
'-FC'
in
line
:
line
.
remove
(
'-FC'
)
try
:
dir_list_producer
=
self
.
get_dir_list
(
line
,
0
)
except
os
.
error
,
why
:
self
.
respond
(
'550 Could not list directory: %s'
%
repr
(
why
))
return
self
.
respond
(
'150 Opening %s mode data connection for file list'
%
(
self
.
type_map
[
self
.
current_mode
]
)
)
self
.
make_xmit_channel
()
self
.
client_dc
.
push_with_producer
(
dir_list_producer
)
self
.
client_dc
.
close_when_done
()
def
cmd_list
(
self
,
line
):
'give list files in a directory'
try
:
dir_list_producer
=
self
.
get_dir_list
(
line
,
1
)
except
os
.
error
,
why
:
self
.
respond
(
'550 Could not list directory: %s'
%
repr
(
why
))
return
self
.
respond
(
'150 Opening %s mode data connection for file list'
%
(
self
.
type_map
[
self
.
current_mode
]
)
)
self
.
make_xmit_channel
()
self
.
client_dc
.
push_with_producer
(
dir_list_producer
)
self
.
client_dc
.
close_when_done
()
def
cmd_cwd
(
self
,
line
):
'change working directory'
if
self
.
cwd
(
line
):
self
.
respond
(
'250 CWD command successful.'
)
else
:
self
.
respond
(
'550 No such directory.'
)
def
cmd_cdup
(
self
,
line
):
'change to parent of current working directory'
if
self
.
cdup
(
line
):
self
.
respond
(
'250 CDUP command successful.'
)
else
:
self
.
respond
(
'550 No such directory.'
)
def
cmd_pwd
(
self
,
line
):
'print the current working directory'
self
.
respond
(
'257 "%s" is the current directory.'
%
(
self
.
filesystem
.
current_directory
()
)
)
# modification time
# example output:
# 213 19960301204320
def
cmd_mdtm
(
self
,
line
):
'show last modification time of file'
filename
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
filename
):
self
.
respond
(
'550 "%s" is not a file'
%
filename
)
else
:
mtime
=
time
.
gmtime
(
self
.
filesystem
.
stat
(
filename
)[
stat
.
ST_MTIME
])
self
.
respond
(
'213 %4d%02d%02d%02d%02d%02d'
%
(
mtime
[
0
],
mtime
[
1
],
mtime
[
2
],
mtime
[
3
],
mtime
[
4
],
mtime
[
5
]
)
)
def
cmd_noop
(
self
,
line
):
'do nothing'
self
.
respond
(
'200 NOOP command successful.'
)
def
cmd_size
(
self
,
line
):
'return size of file'
filename
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
filename
):
self
.
respond
(
'550 "%s" is not a file'
%
filename
)
else
:
self
.
respond
(
'213 %d'
%
(
self
.
filesystem
.
stat
(
filename
)[
stat
.
ST_SIZE
])
)
def
cmd_retr
(
self
,
line
):
'retrieve a file'
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
file
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
file
):
self
.
log_info
(
'checking %s'
%
file
)
self
.
respond
(
'550 No such file'
)
else
:
try
:
# FIXME: for some reason, 'rt' isn't working on win95
mode
=
'r'
+
self
.
type_mode_map
[
self
.
current_mode
]
fd
=
self
.
open
(
file
,
mode
)
except
IOError
,
why
:
self
.
respond
(
'553 could not open file for reading: %s'
%
(
repr
(
why
)))
return
self
.
respond
(
"150 Opening %s mode data connection for file '%s'"
%
(
self
.
type_map
[
self
.
current_mode
],
file
)
)
self
.
make_xmit_channel
()
if
self
.
restart_position
:
# try to position the file as requested, but
# give up silently on failure (the 'file object'
# may not support seek())
try
:
fd
.
seek
(
self
.
restart_position
)
except
:
pass
self
.
restart_position
=
0
self
.
client_dc
.
push_with_producer
(
file_producer
(
self
,
self
.
client_dc
,
fd
)
)
self
.
client_dc
.
close_when_done
()
def
cmd_stor
(
self
,
line
,
mode
=
'wb'
):
'store a file'
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
if
self
.
restart_position
:
restart_position
=
0
self
.
respond
(
'553 restart on STOR not yet supported'
)
return
file
=
line
[
1
]
# todo: handle that type flag
try
:
fd
=
self
.
open
(
file
,
mode
)
except
IOError
,
why
:
self
.
respond
(
'553 could not open file for writing: %s'
%
(
repr
(
why
)))
return
self
.
respond
(
'150 Opening %s connection for %s'
%
(
self
.
type_map
[
self
.
current_mode
],
file
)
)
self
.
make_recv_channel
(
fd
)
def
cmd_abor
(
self
,
line
):
'abort operation'
if
self
.
client_dc
:
self
.
client_dc
.
close
()
self
.
respond
(
'226 ABOR command successful.'
)
def
cmd_appe
(
self
,
line
):
'append to a file'
return
self
.
cmd_stor
(
line
,
'ab'
)
def
cmd_dele
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
file
=
line
[
1
]
if
self
.
filesystem
.
isfile
(
file
):
try
:
self
.
filesystem
.
unlink
(
file
)
self
.
respond
(
'250 DELE command successful.'
)
except
:
self
.
respond
(
'550 error deleting file.'
)
else
:
self
.
respond
(
'550 %s: No such file.'
%
file
)
def
cmd_mkd
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
string
.
join
(
line
))
else
:
path
=
line
[
1
]
try
:
self
.
filesystem
.
mkdir
(
path
)
self
.
respond
(
'257 MKD command successful.'
)
except
:
self
.
respond
(
'550 error creating directory.'
)
def
cmd_rmd
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
string
.
join
(
line
))
else
:
path
=
line
[
1
]
try
:
self
.
filesystem
.
rmdir
(
path
)
self
.
respond
(
'250 RMD command successful.'
)
except
:
self
.
respond
(
'550 error removing directory.'
)
def
cmd_user
(
self
,
line
):
'specify user name'
if
len
(
line
)
>
1
:
self
.
user
=
line
[
1
]
self
.
respond
(
'331 Password required.'
)
else
:
self
.
command_not_understood
(
string
.
join
(
line
))
def
cmd_pass
(
self
,
line
):
'specify password'
if
len
(
line
)
<
2
:
pw
=
''
else
:
pw
=
line
[
1
]
result
,
message
,
fs
=
self
.
server
.
authorizer
.
authorize
(
self
,
self
.
user
,
pw
)
if
result
:
self
.
respond
(
'230 %s'
%
message
)
self
.
filesystem
=
fs
self
.
authorized
=
1
self
.
log_info
(
'Successful login: Filesystem=%s'
%
repr
(
fs
))
else
:
self
.
respond
(
'530 %s'
%
message
)
def
cmd_rest
(
self
,
line
):
'restart incomplete transfer'
try
:
pos
=
string
.
atoi
(
line
[
1
])
except
ValueError
:
self
.
command_not_understood
(
string
.
join
(
line
))
self
.
restart_position
=
pos
self
.
respond
(
'350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.'
%
pos
)
def
cmd_stru
(
self
,
line
):
'obsolete - set file transfer structure'
if
line
[
1
]
in
'fF'
:
# f == 'file'
self
.
respond
(
'200 STRU F Ok'
)
else
:
self
.
respond
(
'504 Unimplemented STRU type'
)
def
cmd_mode
(
self
,
line
):
'obsolete - set file transfer mode'
if
line
[
1
]
in
'sS'
:
# f == 'file'
self
.
respond
(
'200 MODE S Ok'
)
else
:
self
.
respond
(
'502 Unimplemented MODE type'
)
# The stat command has two personalities. Normally it returns status
# information about the current connection. But if given an argument,
# it is equivalent to the LIST command, with the data sent over the
# control connection. Strange. But wuftpd, ftpd, and nt's ftp server
# all support it.
#
## def cmd_stat (self, line):
## 'return status of server'
## pass
def
cmd_syst
(
self
,
line
):
'show operating system type of server system'
# Replying to this command is of questionable utility, because
# this server does not behave in a predictable way w.r.t. the
# output of the LIST command. We emulate Unix ls output, but
# on win32 the pathname can contain drive information at the front
# Currently, the combination of ensuring that os.sep == '/'
# and removing the leading slash when necessary seems to work.
# [cd'ing to another drive also works]
#
# This is how wuftpd responds, and is probably
# the most expected. The main purpose of this reply is so that
# the client knows to expect Unix ls-style LIST output.
self
.
respond
(
'215 UNIX Type: L8'
)
# one disadvantage to this is that some client programs
# assume they can pass args to /bin/ls.
# a few typical responses:
# 215 UNIX Type: L8 (wuftpd)
# 215 Windows_NT version 3.51
# 215 VMS MultiNet V3.3
# 500 'SYST': command not understood. (SVR4)
def
cmd_help
(
self
,
line
):
'give help information'
# find all the methods that match 'cmd_xxxx',
# use their docstrings for the help response.
attrs
=
dir
(
self
.
__class__
)
help_lines
=
[]
for
attr
in
attrs
:
if
attr
[:
4
]
==
'cmd_'
:
x
=
getattr
(
self
,
attr
)
if
type
(
x
)
==
type
(
self
.
cmd_help
):
if
x
.
__doc__
:
help_lines
.
append
(
'
\
t
%s
\
t
%s'
%
(
attr
[
4
:],
x
.
__doc__
))
if
help_lines
:
self
.
push
(
'214-The following commands are recognized
\
r
\
n
'
)
self
.
push_with_producer
(
producers
.
lines_producer
(
help_lines
))
self
.
push
(
'214
\
r
\
n
'
)
else
:
self
.
push
(
'214-
\
r
\
n
\
t
Help Unavailable
\
r
\
n
214
\
r
\
n
'
)
class
ftp_server
(
asyncore
.
dispatcher
):
# override this to spawn a different FTP channel class.
ftp_channel_class
=
ftp_channel
SERVER_IDENT
=
'FTP Server (V%s)'
%
VERSION
def
__init__
(
self
,
authorizer
,
hostname
=
None
,
ip
=
''
,
port
=
21
,
resolver
=
None
,
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
):
self
.
ip
=
ip
self
.
port
=
port
self
.
authorizer
=
authorizer
if
hostname
is
None
:
self
.
hostname
=
socket
.
gethostname
()
else
:
self
.
hostname
=
hostname
# statistics
self
.
total_sessions
=
counter
()
self
.
closed_sessions
=
counter
()
self
.
total_files_out
=
counter
()
self
.
total_files_in
=
counter
()
self
.
total_bytes_out
=
counter
()
self
.
total_bytes_in
=
counter
()
self
.
total_exceptions
=
counter
()
#
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
self
.
ip
,
self
.
port
))
self
.
listen
(
5
)
if
not
logger_object
:
logger_object
=
sys
.
stdout
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
self
.
log_info
(
'FTP server started at %s
\
n
\
t
Authorizer:%s
\
n
\
t
Hostname: %s
\
n
\
t
Port: %d'
%
(
time
.
ctime
(
time
.
time
()),
repr
(
self
.
authorizer
),
self
.
hostname
,
self
.
port
)
)
def
writable
(
self
):
return
0
def
handle_read
(
self
):
pass
def
handle_connect
(
self
):
pass
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
self
.
total_sessions
.
increment
()
self
.
log_info
(
'Incoming connection from %s:%d'
%
(
addr
[
0
],
addr
[
1
]))
self
.
ftp_channel_class
(
self
,
conn
,
addr
)
# return a producer describing the state of the server
def
status
(
self
):
def
nice_bytes
(
n
):
return
string
.
join
(
status_handler
.
english_bytes
(
n
))
return
producers
.
lines_producer
(
[
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br>Listening on <b>Host:</b> %s'
%
self
.
hostname
,
'<b>Port:</b> %d'
%
self
.
port
,
'<br>Sessions'
,
'<b>Total:</b> %s'
%
self
.
total_sessions
,
'<b>Current:</b> %d'
%
(
self
.
total_sessions
.
as_long
()
-
self
.
closed_sessions
.
as_long
()),
'<br>Files'
,
'<b>Sent:</b> %s'
%
self
.
total_files_out
,
'<b>Received:</b> %s'
%
self
.
total_files_in
,
'<br>Bytes'
,
'<b>Sent:</b> %s'
%
nice_bytes
(
self
.
total_bytes_out
.
as_long
()),
'<b>Received:</b> %s'
%
nice_bytes
(
self
.
total_bytes_in
.
as_long
()),
'<br>Exceptions: %s'
%
self
.
total_exceptions
,
]
)
# ======================================================================
# Data Channel Classes
# ======================================================================
# This socket accepts a data connection, used when the server has been
# placed in passive mode. Although the RFC implies that we ought to
# be able to use the same acceptor over and over again, this presents
# a problem: how do we shut it off, so that we are accepting
# connections only when we expect them? [we can't]
#
# wuftpd, and probably all the other servers, solve this by allowing
# only one connection to hit this acceptor. They then close it. Any
# subsequent data-connection command will then try for the default
# port on the client side [which is of course never there]. So the
# 'always-send-PORT/PASV' behavior seems required.
#
# Another note: wuftpd will also be listening on the channel as soon
# as the PASV command is sent. It does not wait for a data command
# first.
# --- we need to queue up a particular behavior:
# 1) xmit : queue up producer[s]
# 2) recv : the file object
#
# It would be nice if we could make both channels the same. Hmmm..
#
class
passive_acceptor
(
asyncore
.
dispatcher
):
ready
=
None
def
__init__
(
self
,
control_channel
):
# connect_fun (conn, addr)
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
control_channel
=
control_channel
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
# bind to an address on the interface that the
# control connection is coming from.
self
.
bind
((
self
.
control_channel
.
getsockname
()[
0
],
0
))
self
.
addr
=
self
.
getsockname
()
self
.
listen
(
1
)
# def __del__ (self):
# print 'passive_acceptor.__del__()'
def
log
(
self
,
*
ignore
):
pass
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
conn
.
setblocking
(
0
)
dc
=
self
.
control_channel
.
client_dc
if
dc
is
not
None
:
dc
.
set_socket
(
conn
)
dc
.
addr
=
addr
dc
.
connected
=
1
self
.
control_channel
.
passive_acceptor
=
None
else
:
self
.
ready
=
conn
,
addr
self
.
close
()
class
xmit_channel
(
asynchat
.
async_chat
):
# for an ethernet, you want this to be fairly large, in fact, it
# _must_ be large for performance comparable to an ftpd. [64k] we
# ought to investigate automatically-sized buffers...
ac_out_buffer_size
=
16384
bytes_out
=
0
def
__init__
(
self
,
channel
,
client_addr
=
None
):
self
.
channel
=
channel
self
.
client_addr
=
client_addr
asynchat
.
async_chat
.
__init__
(
self
)
# def __del__ (self):
# print 'xmit_channel.__del__()'
def
log
(
*
args
):
pass
def
readable
(
self
):
return
not
self
.
connected
def
writable
(
self
):
return
1
def
send
(
self
,
data
):
result
=
asynchat
.
async_chat
.
send
(
self
,
data
)
self
.
bytes_out
=
self
.
bytes_out
+
result
return
result
def
handle_error
(
self
):
# usually this is to catch an unexpected disconnect.
self
.
log_info
(
'unexpected disconnect on data xmit channel'
,
'error'
)
try
:
self
.
close
()
except
:
pass
# TODO: there's a better way to do this. we need to be able to
# put 'events' in the producer fifo. to do this cleanly we need
# to reposition the 'producer' fifo as an 'event' fifo.
# dummy function to suppress warnings caused by some FTP clients
def
handle_connect
(
self
):
pass
def
close
(
self
):
c
=
self
.
channel
s
=
c
.
server
c
.
client_dc
=
None
s
.
total_files_out
.
increment
()
s
.
total_bytes_out
.
increment
(
self
.
bytes_out
)
if
not
len
(
self
.
producer_fifo
):
c
.
respond
(
'226 Transfer complete'
)
elif
not
c
.
closed
:
c
.
respond
(
'426 Connection closed; transfer aborted'
)
del
c
del
s
del
self
.
channel
asynchat
.
async_chat
.
close
(
self
)
class
recv_channel
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
channel
,
client_addr
,
fd
):
self
.
channel
=
channel
self
.
client_addr
=
client_addr
self
.
fd
=
fd
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
bytes_in
=
counter
()
def
log
(
self
,
*
ignore
):
pass
def
handle_connect
(
self
):
pass
def
writable
(
self
):
return
0
def
recv
(
*
args
):
result
=
apply
(
asyncore
.
dispatcher
.
recv
,
args
)
self
=
args
[
0
]
self
.
bytes_in
.
increment
(
len
(
result
))
return
result
buffer_size
=
8192
def
handle_read
(
self
):
block
=
self
.
recv
(
self
.
buffer_size
)
if
block
:
try
:
self
.
fd
.
write
(
block
)
except
IOError
:
self
.
log_info
(
'got exception writing block...'
,
'error'
)
def
handle_close
(
self
):
s
=
self
.
channel
.
server
s
.
total_files_in
.
increment
()
s
.
total_bytes_in
.
increment
(
self
.
bytes_in
.
as_long
())
self
.
fd
.
close
()
self
.
channel
.
respond
(
'226 Transfer complete.'
)
self
.
close
()
import
filesys
# not much of a doorman! 8^)
class
dummy_authorizer
:
def
__init__
(
self
,
root
=
'/'
):
self
.
root
=
root
def
authorize
(
self
,
channel
,
username
,
password
):
channel
.
persona
=
-
1
,
-
1
channel
.
read_only
=
1
return
1
,
'Ok.'
,
filesys
.
os_filesystem
(
self
.
root
)
class
anon_authorizer
:
def
__init__
(
self
,
root
=
'/'
):
self
.
root
=
root
def
authorize
(
self
,
channel
,
username
,
password
):
if
username
in
(
'ftp'
,
'anonymous'
):
channel
.
persona
=
-
1
,
-
1
channel
.
read_only
=
1
return
1
,
'Ok.'
,
filesys
.
os_filesystem
(
self
.
root
)
else
:
return
0
,
'Password invalid.'
,
None
# ===========================================================================
# Unix-specific improvements
# ===========================================================================
if
os
.
name
==
'posix'
:
class
unix_authorizer
:
# return a trio of (success, reply_string, filesystem)
def
authorize
(
self
,
channel
,
username
,
password
):
import
crypt
import
pwd
try
:
info
=
pwd
.
getpwnam
(
username
)
except
KeyError
:
return
0
,
'No such user.'
,
None
mangled
=
info
[
1
]
if
crypt
.
crypt
(
password
,
mangled
[:
2
])
==
mangled
:
channel
.
read_only
=
0
fs
=
filesys
.
schizophrenic_unix_filesystem
(
'/'
,
info
[
5
],
persona
=
(
info
[
2
],
info
[
3
])
)
return
1
,
'Login successful.'
,
fs
else
:
return
0
,
'Password invalid.'
,
None
def
__repr__
(
self
):
return
'<standard unix authorizer>'
# simple anonymous ftp support
class
unix_authorizer_with_anonymous
(
unix_authorizer
):
def
__init__
(
self
,
root
=
None
,
real_users
=
0
):
self
.
root
=
root
self
.
real_users
=
real_users
def
authorize
(
self
,
channel
,
username
,
password
):
if
string
.
lower
(
username
)
in
[
'anonymous'
,
'ftp'
]:
import
pwd
try
:
# ok, here we run into lots of confusion.
# on some os', anon runs under user 'nobody',
# on others as 'ftp'. ownership is also critical.
# need to investigate.
# linux: new linuxen seem to have nobody's UID=-1,
# which is an illegal value. Use ftp.
ftp_user_info
=
pwd
.
getpwnam
(
'ftp'
)
if
string
.
lower
(
os
.
uname
()[
0
])
==
'linux'
:
nobody_user_info
=
pwd
.
getpwnam
(
'ftp'
)
else
:
nobody_user_info
=
pwd
.
getpwnam
(
'nobody'
)
channel
.
read_only
=
1
if
self
.
root
is
None
:
self
.
root
=
ftp_user_info
[
5
]
fs
=
filesys
.
unix_filesystem
(
self
.
root
,
'/'
)
return
1
,
'Anonymous Login Successful'
,
fs
except
KeyError
:
return
0
,
'Anonymous account not set up'
,
None
elif
self
.
real_users
:
return
unix_authorizer
.
authorize
(
self
,
channel
,
username
,
password
)
else
:
return
0
,
'User logins not allowed'
,
None
class
file_producer
:
block_size
=
16384
def
__init__
(
self
,
server
,
dc
,
fd
):
self
.
fd
=
fd
self
.
done
=
0
def
more
(
self
):
if
self
.
done
:
return
''
else
:
block
=
self
.
fd
.
read
(
self
.
block_size
)
if
not
block
:
self
.
fd
.
close
()
self
.
done
=
1
return
block
# usage: ftp_server /PATH/TO/FTP/ROOT PORT
# for example:
# $ ftp_server /home/users/ftp 8021
if
os
.
name
==
'posix'
:
def
test
(
port
=
'8021'
):
import
sys
fs
=
ftp_server
(
unix_authorizer
(),
port
=
string
.
atoi
(
port
)
)
try
:
asyncore
.
loop
()
except
KeyboardInterrupt
:
self
.
log_info
(
'FTP server shutting down. (received SIGINT)'
,
'warning'
)
# close everything down on SIGINT.
# of course this should be a cleaner shutdown.
asyncore
.
close_all
()
if
__name__
==
'__main__'
:
test
(
sys
.
argv
[
1
])
# not unix
else
:
def
test
():
fs
=
ftp_server
(
dummy_authorizer
())
if
__name__
==
'__main__'
:
test
()
# this is the command list from the wuftpd man page
# '*' means we've implemented it.
# '!' requires write access
#
command_documentation
=
{
'abor'
:
'abort previous command'
,
#*
'acct'
:
'specify account (ignored)'
,
'allo'
:
'allocate storage (vacuously)'
,
'appe'
:
'append to a file'
,
#*!
'cdup'
:
'change to parent of current working directory'
,
#*
'cwd'
:
'change working directory'
,
#*
'dele'
:
'delete a file'
,
#!
'help'
:
'give help information'
,
#*
'list'
:
'give list files in a directory'
,
#*
'mkd'
:
'make a directory'
,
#!
'mdtm'
:
'show last modification time of file'
,
#*
'mode'
:
'specify data transfer mode'
,
'nlst'
:
'give name list of files in directory'
,
#*
'noop'
:
'do nothing'
,
#*
'pass'
:
'specify password'
,
#*
'pasv'
:
'prepare for server-to-server transfer'
,
#*
'port'
:
'specify data connection port'
,
#*
'pwd'
:
'print the current working directory'
,
#*
'quit'
:
'terminate session'
,
#*
'rest'
:
'restart incomplete transfer'
,
#*
'retr'
:
'retrieve a file'
,
#*
'rmd'
:
'remove a directory'
,
#!
'rnfr'
:
'specify rename-from file name'
,
#!
'rnto'
:
'specify rename-to file name'
,
#!
'site'
:
'non-standard commands (see next section)'
,
'size'
:
'return size of file'
,
#*
'stat'
:
'return status of server'
,
#*
'stor'
:
'store a file'
,
#*!
'stou'
:
'store a file with a unique name'
,
#!
'stru'
:
'specify data transfer structure'
,
'syst'
:
'show operating system type of server system'
,
#*
'type'
:
'specify data transfer type'
,
#*
'user'
:
'specify user name'
,
#*
'xcup'
:
'change to parent of current working directory (deprecated)'
,
'xcwd'
:
'change working directory (deprecated)'
,
'xmkd'
:
'make a directory (deprecated)'
,
#!
'xpwd'
:
'print the current working directory (deprecated)'
,
'xrmd'
:
'remove a directory (deprecated)'
,
#!
}
# debugging aid (linux)
def
get_vm_size
():
return
string
.
atoi
(
string
.
split
(
open
(
'/proc/self/stat'
).
readline
())[
22
])
def
print_vm
():
print
'vm: %8dk'
%
(
get_vm_size
()
/
1024
)
lib/python/ZServer/medusa/http_bobo.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
import
string
import
regex
RCS_ID
=
'$Id: http_bobo.py,v 1.6 2003/03/18 21:15:17 fdrake Exp $'
VERSION_STRING
=
string
.
split
(
RCS_ID
)[
2
]
class
bobo_extension
:
hits
=
0
SERVER_IDENT
=
'Bobo Extension (V%s)'
%
VERSION_STRING
def
__init__
(
self
,
regexp
):
self
.
regexp
=
regex
.
compile
(
regexp
)
def
__repr__
(
self
):
return
'<Bobo Extension <b>(%d hits)</b> at %x>'
%
(
self
.
hits
,
id
(
self
)
)
def
match
(
self
,
path_part
):
if
self
.
regexp
.
match
(
path_part
)
==
len
(
path_part
):
return
1
else
:
return
0
def
status
(
self
):
return
mstatus
.
lines_producer
([
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br><b>Total Hits:</b> %d'
%
self
.
hits
,
])
def
handle_request
(
self
,
channel
):
self
.
hits
=
self
.
hits
+
1
[
path
,
params
,
query
,
fragment
]
=
channel
.
uri
if
query
:
# cgi_publisher_module doesn't want the leading '?'
query
=
query
[
1
:]
env
=
{}
env
[
'REQUEST_METHOD'
]
=
method
env
[
'SERVER_PORT'
]
=
channel
.
server
.
port
env
[
'SERVER_NAME'
]
=
channel
.
server
.
server_name
env
[
'SCRIPT_NAME'
]
=
module_name
env
[
'QUERY_STRING'
]
=
query
env
[
'PATH_INFO'
]
=
string
.
join
(
path_parts
[
1
:],
'/'
)
# this should really be done with with a real producer. just
# have to make sure it can handle all of the file object api.
sin
=
StringIO
.
StringIO
(
''
)
sout
=
StringIO
.
StringIO
()
serr
=
StringIO
.
StringIO
()
cgi_module_publisher
.
publish_module
(
module_name
,
stdin
=
sin
,
stdout
=
sout
,
stderr
=
serr
,
environ
=
env
,
debug
=
1
)
channel
.
push
(
channel
.
response
(
200
)
+
\
channel
.
generated_content_header
(
path
)
)
self
.
push
(
sout
.
getvalue
())
self
.
push
(
serr
.
getvalue
())
self
.
close_when_done
()
lib/python/ZServer/medusa/http_date.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
import
re
import
string
import
time
def
concat
(
*
args
):
return
''
.
join
(
args
)
def
join
(
seq
,
field
=
' '
):
return
field
.
join
(
seq
)
def
group
(
s
):
return
'('
+
s
+
')'
short_days
=
[
'sun'
,
'mon'
,
'tue'
,
'wed'
,
'thu'
,
'fri'
,
'sat'
]
long_days
=
[
'sunday'
,
'monday'
,
'tuesday'
,
'wednesday'
,
'thursday'
,
'friday'
,
'saturday'
]
short_day_reg
=
group
(
join
(
short_days
,
'|'
))
long_day_reg
=
group
(
join
(
long_days
,
'|'
))
daymap
=
{}
for
i
in
range
(
7
):
daymap
[
short_days
[
i
]]
=
i
daymap
[
long_days
[
i
]]
=
i
hms_reg
=
join
(
3
*
[
group
(
'[0-9][0-9]'
)],
':'
)
months
=
[
'jan'
,
'feb'
,
'mar'
,
'apr'
,
'may'
,
'jun'
,
'jul'
,
'aug'
,
'sep'
,
'oct'
,
'nov'
,
'dec'
]
monmap
=
{}
for
i
in
range
(
12
):
monmap
[
months
[
i
]]
=
i
+
1
months_reg
=
group
(
join
(
months
,
'|'
))
# From draft-ietf-http-v11-spec-07.txt/3.3.1
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
# rfc822 format
rfc822_date
=
join
(
[
concat
(
short_day_reg
,
','
),
# day
group
(
'[0-9][0-9]?'
),
# date
months_reg
,
# month
group
(
'[0-9]+'
),
# year
hms_reg
,
# hour minute second
'gmt'
],
' '
)
rfc822_reg
=
re
.
compile
(
rfc822_date
)
def
unpack_rfc822
(
m
):
g
=
m
.
group
a
=
string
.
atoi
return
(
a
(
g
(
4
)),
# year
monmap
[
g
(
3
)],
# month
a
(
g
(
2
)),
# day
a
(
g
(
5
)),
# hour
a
(
g
(
6
)),
# minute
a
(
g
(
7
)),
# second
0
,
0
,
0
)
# rfc850 format
rfc850_date
=
join
(
[
concat
(
long_day_reg
,
','
),
join
(
[
group
(
'[0-9][0-9]?'
),
months_reg
,
group
(
'[0-9]+'
)
],
'-'
),
hms_reg
,
'gmt'
],
' '
)
rfc850_reg
=
re
.
compile
(
rfc850_date
)
# they actually unpack the same way
def
unpack_rfc850
(
m
):
g
=
m
.
group
a
=
string
.
atoi
return
(
a
(
g
(
4
)),
# year
monmap
[
g
(
3
)],
# month
a
(
g
(
2
)),
# day
a
(
g
(
5
)),
# hour
a
(
g
(
6
)),
# minute
a
(
g
(
7
)),
# second
0
,
0
,
0
)
# parsdate.parsedate - ~700/sec.
# parse_http_date - ~1333/sec.
weekdayname
=
[
'Mon'
,
'Tue'
,
'Wed'
,
'Thu'
,
'Fri'
,
'Sat'
,
'Sun'
]
monthname
=
[
None
,
'Jan'
,
'Feb'
,
'Mar'
,
'Apr'
,
'May'
,
'Jun'
,
'Jul'
,
'Aug'
,
'Sep'
,
'Oct'
,
'Nov'
,
'Dec'
]
def
build_http_date
(
when
):
year
,
month
,
day
,
hh
,
mm
,
ss
,
wd
,
y
,
z
=
time
.
gmtime
(
when
)
return
"%s, %02d %3s %4d %02d:%02d:%02d GMT"
%
(
weekdayname
[
wd
],
day
,
monthname
[
month
],
year
,
hh
,
mm
,
ss
)
def
parse_http_date
(
d
):
d
=
string
.
lower
(
d
)
tz
=
time
.
timezone
m
=
rfc850_reg
.
match
(
d
)
if
m
and
m
.
end
()
==
len
(
d
):
retval
=
int
(
time
.
mktime
(
unpack_rfc850
(
m
))
-
tz
)
else
:
m
=
rfc822_reg
.
match
(
d
)
if
m
and
m
.
end
()
==
len
(
d
):
retval
=
int
(
time
.
mktime
(
unpack_rfc822
(
m
))
-
tz
)
else
:
return
0
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if
time
.
daylight
and
time
.
localtime
(
retval
)[
-
1
]
==
1
:
# DST correction
retval
=
retval
+
(
tz
-
time
.
altzone
)
return
retval
lib/python/ZServer/medusa/http_server.py
0 → 100644
View file @
c658c172
#! /usr/local/bin/python
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: http_server.py,v 1.35 2003/03/18 21:15:17 fdrake Exp $'
# python modules
import
os
import
re
import
socket
import
stat
import
string
import
sys
import
time
import
base64
# async modules
import
asyncore
import
asynchat
# medusa modules
import
http_date
import
producers
import
status_handler
import
logger
if
RCS_ID
.
startswith
(
'$Id: '
):
VERSION_STRING
=
string
.
split
(
RCS_ID
)[
2
]
else
:
VERSION_STRING
=
'0.0'
from
counter
import
counter
from
urllib
import
unquote
# ===========================================================================
# Request Object
# ===========================================================================
class
http_request
:
# default reply code
reply_code
=
200
request_counter
=
counter
()
# Whether to automatically use chunked encoding when
#
# HTTP version is 1.1
# Content-Length is not set
# Chunked encoding is not already in effect
#
# If your clients are having trouble, you might want to disable this.
use_chunked
=
1
# by default, this request object ignores user data.
collector
=
None
def
__init__
(
self
,
*
args
):
# unpack information about the request
(
self
.
channel
,
self
.
request
,
self
.
command
,
self
.
uri
,
self
.
version
,
self
.
header
)
=
args
self
.
outgoing
=
fifo
()
self
.
reply_headers
=
{
'Server'
:
'Medusa/%s'
%
VERSION_STRING
,
'Date'
:
http_date
.
build_http_date
(
time
.
time
())
}
self
.
request_number
=
http_request
.
request_counter
.
increment
()
self
.
_split_uri
=
None
self
.
_header_cache
=
{}
# --------------------------------------------------
# reply header management
# --------------------------------------------------
def
__setitem__
(
self
,
key
,
value
):
self
.
reply_headers
[
key
]
=
value
def
__getitem__
(
self
,
key
):
return
self
.
reply_headers
[
key
]
def
has_key
(
self
,
key
):
return
self
.
reply_headers
.
has_key
(
key
)
def
build_reply_header
(
self
):
return
string
.
join
(
[
self
.
response
(
self
.
reply_code
)]
+
map
(
lambda
x
:
'%s: %s'
%
x
,
self
.
reply_headers
.
items
()
),
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
# --------------------------------------------------
# split a uri
# --------------------------------------------------
# <path>;<params>?<query>#<fragment>
path_regex
=
re
.
compile
(
# path params query fragment
r'([^;?#]*)(;[^?#]*)?(\
?[^#]*)?(#.*)?
'
)
def split_uri (self):
if self._split_uri is None:
m = self.path_regex.match (self.uri)
if m.end() != len(self.uri):
raise ValueError, "Broken URI"
else:
self._split_uri = m.groups()
return self._split_uri
def get_header_with_regex (self, head_reg, group):
for line in self.header:
m = head_reg.match (line)
if m.end() == len(line):
return head_reg.group (group)
return ''
def get_header (self, header):
header = string.lower (header)
hc = self._header_cache
if not hc.has_key (header):
h = header + '
:
'
hl = len(h)
for line in self.header:
if string.lower (line[:hl]) == h:
r = line[hl:]
hc[header] = r
return r
hc[header] = None
return None
else:
return hc[header]
# --------------------------------------------------
# user data
# --------------------------------------------------
def collect_incoming_data (self, data):
if self.collector:
self.collector.collect_incoming_data (data)
else:
self.log_info(
'
Dropping
%
d
bytes
of
incoming
request
data
' % len(data),
'
warning
'
)
def found_terminator (self):
if self.collector:
self.collector.found_terminator()
else:
self.log_info (
'
Unexpected
end
-
of
-
record
for
incoming
request
',
'
warning
'
)
def push (self, thing):
if type(thing) == type(''):
self.outgoing.push (producers.simple_producer (thing))
else:
self.outgoing.push (thing)
def response (self, code=200):
message = self.responses[code]
self.reply_code = code
return '
HTTP
/%
s
%
d
%
s
' % (self.version or '
1.0
', code, message)
def error (self, code):
self.reply_code = code
message = self.responses[code]
s = self.DEFAULT_ERROR_MESSAGE % {
'
code
': code,
'
message
': message,
}
self['
Content
-
Length
'] = len(s)
self['
Content
-
Type
'] = '
text
/
html
'
# make an error reply
self.push (s)
self.done()
# can also be used for empty replies
reply_now = error
def done (self):
"finalize this transaction - send output to the http channel"
# ----------------------------------------
# persistent connection management
# ----------------------------------------
# --- BUCKLE UP! ----
connection = string.lower (get_header (CONNECTION, self.header))
close_it = 0
wrap_in_chunking = 0
if self.version == '
1.0
':
if connection == '
keep
-
alive
':
if not self.has_key ('
Content
-
Length
'):
close_it = 1
else:
self['
Connection
'] = '
Keep
-
Alive
'
else:
close_it = 1
elif self.version == '
1.1
':
if connection == '
close
':
close_it = 1
elif not self.has_key ('
Content
-
Length
'):
if self.has_key ('
Transfer
-
Encoding
'):
if not self['
Transfer
-
Encoding
'] == '
chunked
':
close_it = 1
elif self.use_chunked:
self['
Transfer
-
Encoding
'] = '
chunked
'
wrap_in_chunking = 1
else:
close_it = 1
elif self.version is None:
# Although we don'
t
*
really
*
support
http
/
0.9
(
because
we
'd have to
# use
\
r
\
n
as a terminator, and it would just yuck up a lot of stuff)
# it'
s
very
common
for
developers
to
not
want
to
type
a
version
number
# when using telnet to debug a server.
close_it
=
1
outgoing_header
=
producers
.
simple_producer
(
self
.
build_reply_header
())
if
close_it
:
self
[
'Connection'
]
=
'close'
if
wrap_in_chunking
:
outgoing_producer
=
producers
.
chunked_producer
(
producers
.
composite_producer
(
self
.
outgoing
)
)
# prepend the header
outgoing_producer
=
producers
.
composite_producer
(
fifo
([
outgoing_header
,
outgoing_producer
])
)
else
:
# prepend the header
self
.
outgoing
.
push_front
(
outgoing_header
)
outgoing_producer
=
producers
.
composite_producer
(
self
.
outgoing
)
# apply a few final transformations to the output
self
.
channel
.
push_with_producer
(
# globbing gives us large packets
producers
.
globbing_producer
(
# hooking lets us log the number of bytes sent
producers
.
hooked_producer
(
outgoing_producer
,
self
.
log
)
)
)
self
.
channel
.
current_request
=
None
if
close_it
:
self
.
channel
.
close_when_done
()
def
log_date_string
(
self
,
when
):
logtime
=
time
.
localtime
(
when
)
return
time
.
strftime
(
'%d/'
,
logtime
)
+
\
http_date
.
monthname
[
logtime
[
1
]]
+
\
time
.
strftime
(
'/%Y:%H:%M:%S '
,
logtime
)
+
\
tz_for_log
def
log
(
self
,
bytes
):
user_agent
=
self
.
get_header
(
'user-agent'
)
if
not
user_agent
:
user_agent
=
''
referer
=
self
.
get_header
(
'referer'
)
if
not
referer
:
referer
=
''
auth
=
self
.
get_header
(
'Authorization'
)
name
=
'Anonymous'
if
auth
is
not
None
:
if
string
.
lower
(
auth
[:
6
])
==
'basic '
:
try
:
decoded
=
base64
.
decodestring
(
auth
[
6
:])
except
base64
.
binascii
.
Error
:
decoded
=
''
t
=
string
.
split
(
decoded
,
':'
,
1
)
if
len
(
t
)
<
2
:
name
=
'Unknown (bad auth string)'
else
:
name
=
t
[
0
]
self
.
channel
.
server
.
logger
.
log
(
self
.
channel
.
addr
[
0
],
' - %s [%s] "%s" %d %d "%s" "%s"
\
n
'
%
(
name
,
self
.
log_date_string
(
time
.
time
()),
self
.
request
,
self
.
reply_code
,
bytes
,
referer
,
user_agent
)
)
responses
=
{
100
:
"Continue"
,
101
:
"Switching Protocols"
,
200
:
"OK"
,
201
:
"Created"
,
202
:
"Accepted"
,
203
:
"Non-Authoritative Information"
,
204
:
"No Content"
,
205
:
"Reset Content"
,
206
:
"Partial Content"
,
300
:
"Multiple Choices"
,
301
:
"Moved Permanently"
,
302
:
"Moved Temporarily"
,
303
:
"See Other"
,
304
:
"Not Modified"
,
305
:
"Use Proxy"
,
400
:
"Bad Request"
,
401
:
"Unauthorized"
,
402
:
"Payment Required"
,
403
:
"Forbidden"
,
404
:
"Not Found"
,
405
:
"Method Not Allowed"
,
406
:
"Not Acceptable"
,
407
:
"Proxy Authentication Required"
,
408
:
"Request Time-out"
,
409
:
"Conflict"
,
410
:
"Gone"
,
411
:
"Length Required"
,
412
:
"Precondition Failed"
,
413
:
"Request Entity Too Large"
,
414
:
"Request-URI Too Large"
,
415
:
"Unsupported Media Type"
,
500
:
"Internal Server Error"
,
501
:
"Not Implemented"
,
502
:
"Bad Gateway"
,
503
:
"Service Unavailable"
,
504
:
"Gateway Time-out"
,
505
:
"HTTP Version not supported"
}
# Default error message
DEFAULT_ERROR_MESSAGE
=
string
.
join
(
[
'<head>'
,
'<title>Error response</title>'
,
'</head>'
,
'<body>'
,
'<h1>Error response</h1>'
,
'<p>Error code %(code)d.'
,
'<p>Message: %(message)s.'
,
'</body>'
,
''
],
'
\
r
\
n
'
)
# ===========================================================================
# HTTP Channel Object
# ===========================================================================
class
http_channel
(
asynchat
.
async_chat
):
# use a larger default output buffer
ac_out_buffer_size
=
1
<<
16
current_request
=
None
channel_counter
=
counter
()
def
__init__
(
self
,
server
,
conn
,
addr
):
self
.
channel_number
=
http_channel
.
channel_counter
.
increment
()
self
.
request_counter
=
counter
()
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
self
.
in_buffer
=
''
self
.
creation_time
=
int
(
time
.
time
())
self
.
check_maintenance
()
def
__repr__
(
self
):
ar
=
asynchat
.
async_chat
.
__repr__
(
self
)[
1
:
-
1
]
return
'<%s channel#: %s requests:%s>'
%
(
ar
,
self
.
channel_number
,
self
.
request_counter
)
# Channel Counter, Maintenance Interval...
maintenance_interval
=
500
def
check_maintenance
(
self
):
if
not
self
.
channel_number
%
self
.
maintenance_interval
:
self
.
maintenance
()
def
maintenance
(
self
):
self
.
kill_zombies
()
# 30-minute zombie timeout. status_handler also knows how to kill zombies.
zombie_timeout
=
30
*
60
def
kill_zombies
(
self
):
now
=
int
(
time
.
time
())
for
channel
in
asyncore
.
socket_map
.
values
():
if
channel
.
__class__
==
self
.
__class__
:
if
(
now
-
channel
.
creation_time
)
>
channel
.
zombie_timeout
:
channel
.
close
()
# --------------------------------------------------
# send/recv overrides, good place for instrumentation.
# --------------------------------------------------
# this information needs to get into the request object,
# so that it may log correctly.
def
send
(
self
,
data
):
result
=
asynchat
.
async_chat
.
send
(
self
,
data
)
self
.
server
.
bytes_out
.
increment
(
len
(
data
))
return
result
def
recv
(
self
,
buffer_size
):
try
:
result
=
asynchat
.
async_chat
.
recv
(
self
,
buffer_size
)
self
.
server
.
bytes_in
.
increment
(
len
(
result
))
return
result
except
MemoryError
:
# --- Save a Trip to Your Service Provider ---
# It's possible for a process to eat up all the memory of
# the machine, and put it in an extremely wedged state,
# where medusa keeps running and can't be shut down. This
# is where MemoryError tends to get thrown, though of
# course it could get thrown elsewhere.
sys
.
exit
(
"Out of Memory!"
)
def
handle_error
(
self
):
t
,
v
=
sys
.
exc_info
()[:
2
]
if
t
is
SystemExit
:
raise
t
,
v
else
:
asynchat
.
async_chat
.
handle_error
(
self
)
def
log
(
self
,
*
args
):
pass
# --------------------------------------------------
# async_chat methods
# --------------------------------------------------
def
collect_incoming_data
(
self
,
data
):
if
self
.
current_request
:
# we are receiving data (probably POST data) for a request
self
.
current_request
.
collect_incoming_data
(
data
)
else
:
# we are receiving header (request) data
self
.
in_buffer
=
self
.
in_buffer
+
data
def
found_terminator
(
self
):
if
self
.
current_request
:
self
.
current_request
.
found_terminator
()
else
:
header
=
self
.
in_buffer
self
.
in_buffer
=
''
lines
=
string
.
split
(
header
,
'
\
r
\
n
'
)
# --------------------------------------------------
# crack the request header
# --------------------------------------------------
while
lines
and
not
lines
[
0
]:
# as per the suggestion of http-1.1 section 4.1, (and
# Eric Parker <eparker@zyvex.com>), ignore a leading
# blank lines (buggy browsers tack it onto the end of
# POST requests)
lines
=
lines
[
1
:]
if
not
lines
:
self
.
close_when_done
()
return
request
=
lines
[
0
]
command
,
uri
,
version
=
crack_request
(
request
)
# unquote path if necessary (thanks to Skip Montaro for pointing
# out that we must unquote in piecemeal fashion).
# ajung: we unquote() the request *after* calling crack_request because
# this function breaks when it gets an unquoted request
if
'%'
in
request
:
request
=
unquote
(
request
)
header
=
join_headers
(
lines
[
1
:])
r
=
http_request
(
self
,
request
,
command
,
uri
,
version
,
header
)
self
.
request_counter
.
increment
()
self
.
server
.
total_requests
.
increment
()
if
command
is
None
:
self
.
log_info
(
'Bad HTTP request: %s'
%
repr
(
request
),
'error'
)
r
.
error
(
400
)
return
# --------------------------------------------------
# handler selection and dispatch
# --------------------------------------------------
for
h
in
self
.
server
.
handlers
:
if
h
.
match
(
r
):
try
:
self
.
current_request
=
r
# This isn't used anywhere.
# r.handler = h # CYCLE
h
.
handle_request
(
r
)
except
:
self
.
server
.
exceptions
.
increment
()
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'Server Error: %s, %s: file: %s line: %s'
%
(
t
,
v
,
file
,
line
),
'error'
)
try
:
r
.
error
(
500
)
except
:
pass
return
# no handlers, so complain
r
.
error
(
404
)
def
writable
(
self
):
# this is just the normal async_chat 'writable', here for comparison
return
self
.
ac_out_buffer
or
len
(
self
.
producer_fifo
)
def
writable_for_proxy
(
self
):
# this version of writable supports the idea of a 'stalled' producer
# [i.e., it's not ready to produce any output yet] This is needed by
# the proxy, which will be waiting for the magic combination of
# 1) hostname resolved
# 2) connection made
# 3) data available.
if
self
.
ac_out_buffer
:
return
1
elif
len
(
self
.
producer_fifo
):
p
=
self
.
producer_fifo
.
first
()
if
hasattr
(
p
,
'stalled'
):
return
not
p
.
stalled
()
else
:
return
1
# ===========================================================================
# HTTP Server Object
# ===========================================================================
class
http_server
(
asyncore
.
dispatcher
):
SERVER_IDENT
=
'HTTP Server (V%s)'
%
VERSION_STRING
channel_class
=
http_channel
def
__init__
(
self
,
ip
,
port
,
resolver
=
None
,
logger_object
=
None
):
self
.
ip
=
ip
self
.
port
=
port
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
handlers
=
[]
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
self
.
set_reuse_addr
()
self
.
bind
((
ip
,
port
))
# lower this to 5 if your OS complains
self
.
listen
(
1024
)
host
,
port
=
self
.
socket
.
getsockname
()
if
not
ip
:
self
.
log_info
(
'Computing default hostname'
,
'warning'
)
try
:
ip
=
socket
.
gethostbyname
(
socket
.
gethostname
())
except
socket
.
error
:
ip
=
socket
.
gethostbyname
(
'localhost'
)
try
:
self
.
server_name
=
socket
.
gethostbyaddr
(
ip
)[
0
]
except
socket
.
error
:
self
.
log_info
(
'Cannot do reverse lookup'
,
'warning'
)
self
.
server_name
=
ip
# use the IP address as the "hostname"
self
.
server_port
=
port
self
.
total_clients
=
counter
()
self
.
total_requests
=
counter
()
self
.
exceptions
=
counter
()
self
.
bytes_out
=
counter
()
self
.
bytes_in
=
counter
()
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
self
.
log_info
(
'Medusa (V%s) started at %s'
'
\
n
\
t
Hostname: %s'
'
\
n
\
t
Port:%d'
'
\
n
'
%
(
VERSION_STRING
,
time
.
ctime
(
time
.
time
()),
self
.
server_name
,
port
,
)
)
def
writable
(
self
):
return
0
def
handle_read
(
self
):
pass
def
readable
(
self
):
return
self
.
accepting
def
handle_connect
(
self
):
pass
def
handle_accept
(
self
):
self
.
total_clients
.
increment
()
try
:
tup
=
self
.
accept
()
except
socket
.
error
:
# linux: on rare occasions we get a bogus socket back from
# accept. socketmodule.c:makesockaddr complains that the
# address family is unknown. We don't want the whole server
# to shut down because of this.
self
.
log_info
(
'warning: server accept() threw an exception'
,
'warning'
)
self
.
total_clients
.
decrement
()
return
try
:
conn
,
addr
=
tup
except
TypeError
:
# unpack non-sequence. this can happen when a read event
# fires on a listening socket, but when we call accept()
# we get EWOULDBLOCK, so dispatcher.accept() returns None.
# Seen on FreeBSD3 and Linux.
#self.log_info ('warning: server accept() returned %s '
# '(EWOULDBLOCK?)' % tup, 'warning')
self
.
total_clients
.
decrement
()
return
self
.
channel_class
(
self
,
conn
,
addr
)
def
install_handler
(
self
,
handler
,
back
=
0
):
if
back
:
self
.
handlers
.
append
(
handler
)
else
:
self
.
handlers
.
insert
(
0
,
handler
)
def
remove_handler
(
self
,
handler
):
self
.
handlers
.
remove
(
handler
)
def
status
(
self
):
def
nice_bytes
(
n
):
return
string
.
join
(
status_handler
.
english_bytes
(
n
))
handler_stats
=
filter
(
None
,
map
(
maybe_status
,
self
.
handlers
))
if
self
.
total_clients
:
ratio
=
self
.
total_requests
.
as_long
()
/
float
(
self
.
total_clients
.
as_long
())
else
:
ratio
=
0.0
return
producers
.
composite_producer
(
fifo
([
producers
.
lines_producer
(
[
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br>Listening on: <b>Host:</b> %s'
%
self
.
server_name
,
'<b>Port:</b> %d'
%
self
.
port
,
'<p><ul>'
'<li>Total <b>Clients:</b> %s'
%
self
.
total_clients
,
'<b>Requests:</b> %s'
%
self
.
total_requests
,
'<b>Requests/Client:</b> %.1f'
%
(
ratio
),
'<li>Total <b>Bytes In:</b> %s'
%
(
nice_bytes
(
self
.
bytes_in
.
as_long
())),
'<b>Bytes Out:</b> %s'
%
(
nice_bytes
(
self
.
bytes_out
.
as_long
())),
'<li>Total <b>Exceptions:</b> %s'
%
self
.
exceptions
,
'</ul><p>'
'<b>Extension List</b><ul>'
,
])]
+
handler_stats
+
[
producers
.
simple_producer
(
'</ul>'
)]
)
)
def
maybe_status
(
thing
):
if
hasattr
(
thing
,
'status'
):
return
thing
.
status
()
else
:
return
None
CONNECTION
=
re
.
compile
(
'Connection: (.*)'
,
re
.
IGNORECASE
)
# merge multi-line headers
# [486dx2: ~500/sec]
def
join_headers
(
headers
):
r
=
[]
for
i
in
range
(
len
(
headers
)):
if
headers
[
i
][
0
]
in
'
\
t
'
:
r
[
-
1
]
=
r
[
-
1
]
+
headers
[
i
][
1
:]
else
:
r
.
append
(
headers
[
i
])
return
r
def
get_header
(
head_reg
,
lines
,
group
=
1
):
for
line
in
lines
:
m
=
head_reg
.
match
(
line
)
if
m
and
m
.
end
()
==
len
(
line
):
return
m
.
group
(
group
)
return
''
def
get_header_match
(
head_reg
,
lines
):
for
line
in
lines
:
m
=
head_reg
.
match
(
line
)
if
m
and
m
.
end
()
==
len
(
line
):
return
m
return
''
REQUEST
=
re
.
compile
(
'([^ ]+) (?:[^ :?#]+://[^ ?#/]*)?([^ ]+)(( HTTP/([0-9.]+))$|$)'
)
def
crack_request
(
r
):
m
=
REQUEST
.
match
(
r
)
if
m
is
not
None
and
m
.
end
()
==
len
(
r
):
if
m
.
group
(
3
):
version
=
m
.
group
(
5
)
else
:
version
=
None
return
string
.
lower
(
m
.
group
(
1
)),
m
.
group
(
2
),
version
else
:
return
None
,
None
,
None
class
fifo
:
def
__init__
(
self
,
list
=
None
):
if
not
list
:
self
.
list
=
[]
else
:
self
.
list
=
list
def
__len__
(
self
):
return
len
(
self
.
list
)
def
first
(
self
):
return
self
.
list
[
0
]
def
push_front
(
self
,
object
):
self
.
list
.
insert
(
0
,
object
)
def
push
(
self
,
data
):
self
.
list
.
append
(
data
)
def
pop
(
self
):
if
self
.
list
:
result
=
self
.
list
[
0
]
del
self
.
list
[
0
]
return
(
1
,
result
)
else
:
return
(
0
,
None
)
def
compute_timezone_for_log
():
if
time
.
daylight
:
tz
=
time
.
altzone
else
:
tz
=
time
.
timezone
if
tz
>
0
:
neg
=
1
else
:
neg
=
0
tz
=
-
tz
h
,
rem
=
divmod
(
tz
,
3600
)
m
,
rem
=
divmod
(
rem
,
60
)
if
neg
:
return
'-%02d%02d'
%
(
h
,
m
)
else
:
return
'+%02d%02d'
%
(
h
,
m
)
# if you run this program over a TZ change boundary, this will be invalid.
tz_for_log
=
compute_timezone_for_log
()
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
<
2
:
print
'usage: %s <root> <port>'
%
(
sys
.
argv
[
0
])
else
:
import
monitor
import
filesys
import
default_handler
import
status_handler
import
ftp_server
import
chat_server
import
resolver
import
logger
rs
=
resolver
.
caching_resolver
(
'127.0.0.1'
)
lg
=
logger
.
file_logger
(
sys
.
stdout
)
ms
=
monitor
.
secure_monitor_server
(
'fnord'
,
'127.0.0.1'
,
9999
)
fs
=
filesys
.
os_filesystem
(
sys
.
argv
[
1
])
dh
=
default_handler
.
default_handler
(
fs
)
hs
=
http_server
(
''
,
string
.
atoi
(
sys
.
argv
[
2
]),
rs
,
lg
)
hs
.
install_handler
(
dh
)
ftp
=
ftp_server
.
ftp_server
(
ftp_server
.
dummy_authorizer
(
sys
.
argv
[
1
]),
port
=
8021
,
resolver
=
rs
,
logger_object
=
lg
)
cs
=
chat_server
.
chat_server
(
''
,
7777
)
sh
=
status_handler
.
status_extension
([
hs
,
ms
,
ftp
,
cs
,
rs
])
hs
.
install_handler
(
sh
)
if
(
'-p'
in
sys
.
argv
):
def
profile_loop
():
try
:
asyncore
.
loop
()
except
KeyboardInterrupt
:
pass
import
profile
profile
.
run
(
'profile_loop()'
,
'profile.out'
)
else
:
asyncore
.
loop
()
lib/python/ZServer/medusa/logger.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
import
asynchat
import
socket
import
string
import
time
# these three are for the rotating logger
import
os
# |
import
stat
# v
#
# three types of log:
# 1) file
# with optional flushing. Also, one that rotates the log.
# 2) socket
# dump output directly to a socket connection. [how do we
# keep it open?]
# 3) syslog
# log to syslog via tcp. this is a per-line protocol.
#
#
# The 'standard' interface to a logging object is simply
# log_object.log (message)
#
# a file-like object that captures output, and
# makes sure to flush it always... this could
# be connected to:
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
class
file_logger
:
# pass this either a path or a file object.
def
__init__
(
self
,
file
,
flush
=
1
,
mode
=
'a'
):
self
.
filename
=
None
if
type
(
file
)
==
type
(
''
):
if
(
file
==
'-'
):
import
sys
self
.
file
=
sys
.
stdout
else
:
self
.
filename
=
file
self
.
file
=
open
(
file
,
mode
)
else
:
self
.
file
=
file
self
.
do_flush
=
flush
def
reopen
(
self
):
if
self
.
filename
:
self
.
file
.
close
()
self
.
file
=
open
(
self
.
filename
,
'a'
)
def
__repr__
(
self
):
return
'<file logger: %s>'
%
self
.
file
def
write
(
self
,
data
):
self
.
file
.
write
(
data
)
self
.
maybe_flush
()
def
writeline
(
self
,
line
):
self
.
file
.
writeline
(
line
)
self
.
maybe_flush
()
def
writelines
(
self
,
lines
):
self
.
file
.
writelines
(
lines
)
self
.
maybe_flush
()
def
maybe_flush
(
self
):
if
self
.
do_flush
:
self
.
file
.
flush
()
def
flush
(
self
):
self
.
file
.
flush
()
def
softspace
(
self
,
*
args
):
pass
def
log
(
self
,
message
):
if
message
[
-
1
]
not
in
(
'
\
r
'
,
'
\
n
'
):
self
.
write
(
message
+
'
\
n
'
)
else
:
self
.
write
(
message
)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
# it backs up the log and starts a new one. Note that backing
# up the log is done via "mv" because anything else (cp, gzip)
# would take time, during which medusa would do nothing else.
class
rotating_file_logger
(
file_logger
):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def
__init__
(
self
,
file
,
freq
=
None
,
maxsize
=
None
,
flush
=
1
,
mode
=
'a'
):
self
.
filename
=
file
self
.
mode
=
mode
self
.
file
=
open
(
file
,
mode
)
self
.
freq
=
freq
self
.
maxsize
=
maxsize
self
.
rotate_when
=
self
.
next_backup
(
self
.
freq
)
self
.
do_flush
=
flush
def
__repr__
(
self
):
return
'<rotating-file logger: %s>'
%
self
.
file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def
next_backup
(
self
,
freq
):
(
yr
,
mo
,
day
,
hr
,
min
,
sec
,
wd
,
jday
,
dst
)
=
time
.
localtime
(
time
.
time
())
if
freq
==
'daily'
:
return
time
.
mktime
((
yr
,
mo
,
day
+
1
,
0
,
0
,
0
,
0
,
0
,
-
1
))
elif
freq
==
'weekly'
:
return
time
.
mktime
((
yr
,
mo
,
day
-
wd
+
7
,
0
,
0
,
0
,
0
,
0
,
-
1
))
# wd(monday)==0
elif
freq
==
'monthly'
:
return
time
.
mktime
((
yr
,
mo
+
1
,
1
,
0
,
0
,
0
,
0
,
0
,
-
1
))
else
:
return
None
# not a date-based backup
def
maybe_flush
(
self
):
# rotate first if necessary
self
.
maybe_rotate
()
if
self
.
do_flush
:
# from file_logger()
self
.
file
.
flush
()
def
maybe_rotate
(
self
):
if
self
.
freq
and
time
.
time
()
>
self
.
rotate_when
:
self
.
rotate
()
self
.
rotate_when
=
self
.
next_backup
(
self
.
freq
)
elif
self
.
maxsize
:
# rotate when we get too big
try
:
if
os
.
stat
(
self
.
filename
)[
stat
.
ST_SIZE
]
>
self
.
maxsize
:
self
.
rotate
()
except
os
.
error
:
# file not found, probably
self
.
rotate
()
# will create a new file
def
rotate
(
self
):
(
yr
,
mo
,
day
,
hr
,
min
,
sec
,
wd
,
jday
,
dst
)
=
time
.
localtime
(
time
.
time
())
try
:
self
.
file
.
close
()
newname
=
'%s.ends%04d%02d%02d'
%
(
self
.
filename
,
yr
,
mo
,
day
)
try
:
open
(
newname
,
"r"
).
close
()
# check if file exists
newname
=
newname
+
"-%02d%02d%02d"
%
(
hr
,
min
,
sec
)
except
:
# YEARMODY is unique
pass
os
.
rename
(
self
.
filename
,
newname
)
self
.
file
=
open
(
self
.
filename
,
self
.
mode
)
except
:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
# TODO: a simple safety wrapper that will ensure that the line sent
# to syslog is reasonable.
# TODO: async version of syslog_client: now, log entries use blocking
# send()
import
m_syslog
syslog_logger
=
m_syslog
.
syslog_client
class
syslog_logger
(
m_syslog
.
syslog_client
):
svc_name
=
'medusa'
pid_str
=
str
(
os
.
getpid
())
def
__init__
(
self
,
address
,
facility
=
'user'
):
m_syslog
.
syslog_client
.
__init__
(
self
,
address
)
self
.
facility
=
m_syslog
.
facility_names
[
facility
]
self
.
address
=
address
def
__repr__
(
self
):
return
'<syslog logger address=%s>'
%
(
repr
(
self
.
address
))
def
log
(
self
,
message
):
m_syslog
.
syslog_client
.
log
(
self
,
'%s[%s]: %s'
%
(
self
.
svc_name
,
self
.
pid_str
,
message
),
facility
=
self
.
facility
,
priority
=
m_syslog
.
LOG_INFO
)
# log to a stream socket, asynchronously
class
socket_logger
(
asynchat
.
async_chat
):
def
__init__
(
self
,
address
):
if
type
(
address
)
==
type
(
''
):
self
.
create_socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
else
:
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
connect
(
address
)
self
.
address
=
address
def
__repr__
(
self
):
return
'<socket logger: address=%s>'
%
(
self
.
address
)
def
log
(
self
,
message
):
if
message
[
-
2
:]
!=
'
\
r
\
n
'
:
self
.
socket
.
push
(
message
+
'
\
r
\
n
'
)
else
:
self
.
socket
.
push
(
message
)
# log to multiple places
class
multi_logger
:
def
__init__
(
self
,
loggers
):
self
.
loggers
=
loggers
def
__repr__
(
self
):
return
'<multi logger: %s>'
%
(
repr
(
self
.
loggers
))
def
log
(
self
,
message
):
for
logger
in
self
.
loggers
:
logger
.
log
(
message
)
class
resolving_logger
:
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def
__init__
(
self
,
resolver
,
logger
):
self
.
resolver
=
resolver
self
.
logger
=
logger
class
logger_thunk
:
def
__init__
(
self
,
message
,
logger
):
self
.
message
=
message
self
.
logger
=
logger
def
__call__
(
self
,
host
,
ttl
,
answer
):
if
not
answer
:
answer
=
host
self
.
logger
.
log
(
'%s%s'
%
(
answer
,
self
.
message
))
def
log
(
self
,
ip
,
message
):
self
.
resolver
.
resolve_ptr
(
ip
,
self
.
logger_thunk
(
message
,
self
.
logger
)
)
class
unresolving_logger
:
"Just in case you don't want to resolve"
def
__init__
(
self
,
logger
):
self
.
logger
=
logger
def
log
(
self
,
ip
,
message
):
self
.
logger
.
log
(
'%s %s'
%
(
ip
,
message
))
def
strip_eol
(
line
):
while
line
and
line
[
-
1
]
in
'
\
r
\
n
'
:
line
=
line
[:
-
1
]
return
line
class
tail_logger
:
"Keep track of the last <size> log messages"
def
__init__
(
self
,
logger
,
size
=
500
):
self
.
size
=
size
self
.
logger
=
logger
self
.
messages
=
[]
def
log
(
self
,
message
):
self
.
messages
.
append
(
strip_eol
(
message
))
if
len
(
self
.
messages
)
>
self
.
size
:
del
self
.
messages
[
0
]
self
.
logger
.
log
(
message
)
lib/python/ZServer/medusa/m_syslog.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# ======================================================================
# Copyright 1997 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""socket interface to unix syslog.
On Unix, there are usually two ways of getting to syslog: via a
local unix-domain socket, or via the TCP service.
Usually "/dev/log" is the unix domain socket. This may be different
for other systems.
>>> my_client = syslog_client ('/dev/log')
Otherwise, just use the UDP version, port 514.
>>> my_client = syslog_client (('my_log_host', 514))
On win32, you will have to use the UDP version. Note that
you can use this to log to other hosts (and indeed, multiple
hosts).
This module is not a drop-in replacement for the python
<syslog> extension module - the interface is different.
Usage:
>>> c = syslog_client()
>>> c = syslog_client ('/strange/non_standard_log_location')
>>> c = syslog_client (('other_host.com', 514))
>>> c.log ('testing', facility='local0', priority='debug')
"""
# TODO: support named-pipe syslog.
# [see ftp://sunsite.unc.edu/pub/Linux/system/Daemons/syslog-fifo.tar.z]
# from <linux/sys/syslog.h>:
# ===========================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where the
# bottom 3 bits are the priority (0-7) and the top 28 bits are the facility
# (0-big number). Both the priorities and the facilities map roughly
# one-to-one to strings in the syslogd(8) source code. This mapping is
# included in this file.
#
# priorities (these are ordered)
LOG_EMERG
=
0
# system is unusable
LOG_ALERT
=
1
# action must be taken immediately
LOG_CRIT
=
2
# critical conditions
LOG_ERR
=
3
# error conditions
LOG_WARNING
=
4
# warning conditions
LOG_NOTICE
=
5
# normal but significant condition
LOG_INFO
=
6
# informational
LOG_DEBUG
=
7
# debug-level messages
# facility codes
LOG_KERN
=
0
# kernel messages
LOG_USER
=
1
# random user-level messages
LOG_MAIL
=
2
# mail system
LOG_DAEMON
=
3
# system daemons
LOG_AUTH
=
4
# security/authorization messages
LOG_SYSLOG
=
5
# messages generated internally by syslogd
LOG_LPR
=
6
# line printer subsystem
LOG_NEWS
=
7
# network news subsystem
LOG_UUCP
=
8
# UUCP subsystem
LOG_CRON
=
9
# clock daemon
LOG_AUTHPRIV
=
10
# security/authorization messages (private)
# other codes through 15 reserved for system use
LOG_LOCAL0
=
16
# reserved for local use
LOG_LOCAL1
=
17
# reserved for local use
LOG_LOCAL2
=
18
# reserved for local use
LOG_LOCAL3
=
19
# reserved for local use
LOG_LOCAL4
=
20
# reserved for local use
LOG_LOCAL5
=
21
# reserved for local use
LOG_LOCAL6
=
22
# reserved for local use
LOG_LOCAL7
=
23
# reserved for local use
priority_names
=
{
"alert"
:
LOG_ALERT
,
"crit"
:
LOG_CRIT
,
"debug"
:
LOG_DEBUG
,
"emerg"
:
LOG_EMERG
,
"err"
:
LOG_ERR
,
"error"
:
LOG_ERR
,
# DEPRECATED
"info"
:
LOG_INFO
,
"notice"
:
LOG_NOTICE
,
"panic"
:
LOG_EMERG
,
# DEPRECATED
"warn"
:
LOG_WARNING
,
# DEPRECATED
"warning"
:
LOG_WARNING
,
}
facility_names
=
{
"auth"
:
LOG_AUTH
,
"authpriv"
:
LOG_AUTHPRIV
,
"cron"
:
LOG_CRON
,
"daemon"
:
LOG_DAEMON
,
"kern"
:
LOG_KERN
,
"lpr"
:
LOG_LPR
,
"mail"
:
LOG_MAIL
,
"news"
:
LOG_NEWS
,
"security"
:
LOG_AUTH
,
# DEPRECATED
"syslog"
:
LOG_SYSLOG
,
"user"
:
LOG_USER
,
"uucp"
:
LOG_UUCP
,
"local0"
:
LOG_LOCAL0
,
"local1"
:
LOG_LOCAL1
,
"local2"
:
LOG_LOCAL2
,
"local3"
:
LOG_LOCAL3
,
"local4"
:
LOG_LOCAL4
,
"local5"
:
LOG_LOCAL5
,
"local6"
:
LOG_LOCAL6
,
"local7"
:
LOG_LOCAL7
,
}
import
socket
class
syslog_client
:
def
__init__
(
self
,
address
=
'/dev/log'
):
self
.
address
=
address
if
type
(
address
)
==
type
(
''
):
try
:
# APUE 13.4.2 specifes /dev/log as datagram socket
self
.
socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_DGRAM
)
self
.
socket
.
connect
(
address
)
except
:
# older linux may create as stream socket
self
.
socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
self
.
socket
.
connect
(
address
)
self
.
unix
=
1
else
:
self
.
socket
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_DGRAM
)
self
.
unix
=
0
log_format_string
=
'<%d>%s
\
000
'
def
log
(
self
,
message
,
facility
=
LOG_USER
,
priority
=
LOG_INFO
):
message
=
self
.
log_format_string
%
(
self
.
encode_priority
(
facility
,
priority
),
message
)
if
self
.
unix
:
self
.
socket
.
send
(
message
)
else
:
self
.
socket
.
sendto
(
message
,
self
.
address
)
def
encode_priority
(
self
,
facility
,
priority
):
if
type
(
facility
)
==
type
(
''
):
facility
=
facility_names
[
facility
]
if
type
(
priority
)
==
type
(
''
):
priority
=
priority_names
[
priority
]
return
(
facility
<<
3
)
|
priority
def
close
(
self
):
if
self
.
unix
:
self
.
socket
.
close
()
lib/python/ZServer/medusa/medusa.html
0 → 100644
View file @
c658c172
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>
Medusa: A High-Performance Internet Server Architecture
</title>
</head>
<body>
<h1>
<b>
Medusa
</b>
: A High-Performance Internet Server Architecture
</h1>
<h2>
What is Medusa?
</h2>
Medusa is an architecture for high-performance, robust, long-running
TCP/IP servers (like HTTP, FTP, and NNTP). Medusa differs from most
other server architectures in that it runs as a single process,
multiplexing I/O with its various client and server connections within
a single process/thread.
<p>
Medusa is written in
<a
href=
"http://www.python.org/"
>
Python
</a>
, a
high-level object-oriented language that is particularly well suited
to building powerful, extensible servers. Medusa can be extended and
modified at run-time, even by the end-user. User 'scripts' can be
used to completely change the behavior of the server, and even add in
completely new server types.
<h2>
How Does it Work?
</h2>
Most Internet servers are built on a 'forking' model. ('Fork' is a
Unix term for starting a new process.) Such servers actually invoke
an entire new process for every single client connection. This
approach is simple to implement, but does not scale very well to
high-load situations. Lots of clients mean a lot of processes, which
gobble up large quantities of virtual memory and other system
resources. A high-load server thus needs to have a lot of memory.
Many popular Internet servers are running with hundreds of megabytes
of memory.
<p>
<h3>
The I/O bottleneck.
</h3>
<p>
The vast majority of Internet servers are I/O bound - for any one
process, the CPU is sitting idle 99.9% of the time, usually waiting
for input from an external device (in the case of an Internet server,
it is waiting for input from the network). This problem is
exacerbated by the imbalance between server and client bandwidth: most
clients are connecting at relatively low bandwidths (28.8 kbits/sec or
less, with network delays and inefficiencies it can be far lower). To
a typical server CPU, the time between bytes for such a client seems
like an eternity! (Consider that a 200 Mhz CPU can perform roughly
50,000 operations for each byte received from such a client).
<p>
A simple metaphor for a 'forking' server is that of a supermarket
cashier: for every 'customer' being processed [at a cash register],
another 'person' must be created to handle each client session. But
what if your checkout clerks were so fast they could each individually
handle hundreds of customers per second? Since these clerks are
almost always waiting for a customer to come through their line, you
have a very large staff, sitting around idle 99.9% of the time! Why
not replace this staff with a single
<i>
super-clerk
</i>
, flitting
from aisle to aisle ?
<p>
This is exactly how Medusa works! It multiplexes all its I/O through
a single select() loop - this loop can handle hundreds, even thousands
of simultaneous connections - the actual number is limited only by your
operating system. For a more technical overview, see
<a
href=
"http://www.nightmare.com/medusa/async_sockets.html"
>
Asynchronous Socket Programming
</a>
<h2>
Why is it Better?
</h2>
<h3>
Performance
</h3>
<p>
The most obvious advantage to a single long-running server process is
a dramatic improvement in performance. There are several types of
overhead involved in the forking model:
<ul>
<li>
<b>
Process creation/destruction.
</b>
<p>
Starting up a new process is an expensive operation on any operating
system. Virtual memory must be allocated, libraries must be
initialized, and the operating system now has yet another task to
keep track of. This start-up cost is so high that it is actually
<i>
noticeable
</i>
to people! For example, the first time you pull
up a web page with 15 inline images, while you are waiting for the
page to load you may have created and destroyed at least 16
processes on the web server.
<p>
<li>
<b>
Virtual Memory
</b>
<p>
Each process also requires a certain amount of virtual memory space
to be allocated on its behalf. Even though most operating systems
implement a 'copy-on-write' strategy that makes this much less
costly than it could be, the end result is still very wasteful. A
100-user FTP server can still easily require hundreds of megabytes
of real memory in order to avoid thrashing (excess paging activity
due to lack of real memory).
</ul>
<b>
Medusa
</b>
eliminates both types of overhead. Running as a
single process, there is no per-client creation/destruction
overhead. This means each client request is answered very quickly.
And virtual memory requirements are lowered dramatically. Memory
requirements can even be controlled with more precision in order to
gain the highest performance possible for a particular machine
configuration.
<h3>
Persistence
</h3>
<p>
Another major advantage to the single-process model is
<i>
persistence
</i>
. Often it is necessary to maintain some sort of
state information that is available to each and every client, i.e., a
database connection or file pointer. Forking-model servers that need
such shared state must arrange some method of getting it - usually via
an IPC (inter-process communication) mechanism such as sockets or
named pipes. IPC itself adds yet another significant and needless
overhead - single-process servers can simply share such information
within a single address space.
<p>
Implementing persistence in Medusa is easy - the address space of its
process (and thus its open database handles, variables, etc...) is
available to each and every client.
<h3>
Not a Strawman
</h3>
All right, at this point many of my readers will say I'm beating up on
a strawman. In fact, they will say, such server architectures are
already available - like Microsoft's Internet Information Server.
IIS avoids the above-named problems by using
<i>
threads
</i>
. Threads
are 'lightweight processes' - they represent multiple concurrent
execution paths within a single address space. Threads solve many of
the problems mentioned above, but also create new ones:
<ul>
<li>
'Threaded' programs are very difficult to write - especially
with servers that want to utilize the 'persistence' feature -
great care must be taken when accessing or modifying shared resources.
<li>
There is still additional system overhead when using threads.
<li>
Not all operating systems support threads, and even on those
that do, it is difficult to use them in a portable fashion.
</ul>
<p>
Threads are
<i>
required
</i>
in only a limited number of
situations. In many cases where threads seem appropriate, an
asynchronous solution can actually be written with less work, and
will perform better. Avoiding the use of threads also makes access
to shared resources (like database connections) easier to manage,
since multi-user locking is not necessary.
<p>
<b>
Note:
</b>
In the rare case where threads are actually
necessary, Medusa can of course use them, if the host operating system
supports them. For example, an image-conversion or fractal-generating
server might be CPU-intensive, rather than I/O-bound, and thus a good
candidate for running in a separate thread.
<p>
Another solution (used by many current HTTP servers on Unix) is to
'pre-spawn' a large number of processes - clients are attached to each
server in turn. Although this alleviates the performance problem
<i>
up to that number of users
</i>
, it still does not scale well. To
reliably and efficiently handle
<i>
[n]
</i>
users,
<i>
[n]
</i>
processes
are still necessary.
<h3>
Other Advantages
</h3>
<ul>
<li>
<b>
Extensibility
</b>
<p>
Since Medusa is written in Python, it is easily extensible. No
separate compilation is necessary. New facilities can be loaded
and unloaded into the server without any recompilation or
linking, even while the server is running. [For example, Medusa
can be configured to automatically upgrade itself to the latest
version every so often].
<p>
<li>
<b>
Security
</b>
<p>
Many of the most popular security holes (popular, at least,
among the mischievous) exploit the fact that servers are usually
written in a low-level language. Unless such languages are used
with extreme care, weaknesses can be introduced that are very
difficult to predict and control. One of the favorite
loop-holes is the 'memory buffer overflow', used by the Internet
Worm (and many others) to gain unwarranted access to Internet
servers.
</ul>
<p>
Such problems are virtually non-existent when working in a
high-level language like Python, where for example all access to
variables and their components are checked at run-time for valid
range operations. Even unforseen errors and operating system
bugs can be caught - Python includes a full exception-handling
system which promotes the construction of 'highly available'
servers. Rather than crashing the entire server, Medusa will
usually inform the user, log the error, and keep right on running.
<h2>
Current Features
</h2>
<ul>
<li>
<p>
The currently available version of Medusa includes
integrated World Wide Web (
<b>
HTTP
</b>
) and file transfer
(
<b>
FTP
</b>
) servers. This combined server can solve a major
performance problem at any high-load site, by replacing two
forking servers with a single non-forking, non-threading server.
Multiple servers of each type can also be instantiated.
<p>
<li>
<p>
Also included is a secure 'remote-control' capability,
called a
<b>
monitor
</b>
server. With this server enabled,
authorized users can 'log in' to the running server, and control,
manipulate, and examine the server
<i>
while it is running
</i>
.
<p>
<li>
<p>
A 'chat server' is included, as a sample server
implementation. It's simple enough to serve as a good
introduction to extending Medusa. It implements a simple IRC-like
chat service that could easily be integrated with the HTTP server
for an integrated web-oriented chat service. [For example, a
small Java applet could be used on the client end to communicate
with the server].
<p>
<li>
<p>
Several extensions are available for the HTTP server, and
more will become available over time. Each of these extensions can
be loaded/unloaded into the server dynamically.
<p>
<dl>
<dt>
<b>
Status Extension
</b>
<dd>
Provides status
information via the HTTP server. Can report on any or all of
the installed servers, and on the extensions loaded into the
HTTP server. [If this server is running Medusa, you should be
able to see it
<a
href=
"/status"
>
here
</a>
]
<dt>
<b>
Default Extension
</b>
<dd>
Provides the 'standard'
file-delivery http server behavior. Uses the same abstract
filesystem object as the FTP server. Supports the HTTP/1.1
persistent connection via the 'Connection: Keep-Alive' header.
<dt>
<b>
HTTP Proxy Extension
</b>
<dd>
Act as a proxy server for HTTP
requests. This lets Medusa be used as a 'Firewall' server.
Plans for this extension include cache support, filtering (to
ignore, say, all images from
'http://obnoxious.blinky.advertisements.com/'), logging,
etc...
<dt>
<b>
Planned
</b>
<dd>
On the drawing board are pseudo-filesystem
extensions, access to databases like mSQL and Oracle, (and on Windows
via ODBC), authentication, server-side includes, and a full-blown
proxy/cache system for both HTTP and FTP. Feedback from users will
help me decide which areas to concentrate on, so please email me any
suggestions.
</dl>
<p>
<li>
An API is evolving for users to extend not just the HTTP
server but Medusa as a whole, mixing in other server types and new
capabilities into existing servers. NNTP and POP3 servers have
already been written, and will probably be provided as an add-on.
I am actively encouraging other developers to produce (and if they
wish, to market) Medusa extensions.
</ul>
<h2>
Where Can I Get It?
</h2>
<p>
Medusa is available from
<a
href=
"http://www.nightmare.com/medusa/"
>
http://www.nightmare.com/medusa
</a>
<p>
Feedback, both positive and negative, is much appreciated; please send
email to
<a
href=
"mailto:rushing@nightmare.com"
>
rushing@nightmare.com
</a>
.
</body>
</html>
lib/python/ZServer/medusa/medusa_gif.py
0 → 100644
View file @
c658c172
# -*- Mode: Python -*-
# the medusa icon as a python source file.
width
=
97
height
=
61
data
=
'GIF89aa
\
000
=
\
000
\
204
\
000
\
000
\
000
\
000
\
000
\
255
\
255
\
255
\
245
\
245
\
245
ssskkkccc111)))
\
326
\
326
\
326
!!!
\
316
\
316
\
316
\
300
\
300
\
300
\
204
\
204
\
000
\
224
\
224
\
224
\
214
\
214
\
214
\
200
\
200
\
200
RRR
\
377
\
377
\
377
JJJ
\
367
\
367
\
367
BBB
\
347
\
347
\
347
\
000
\
204
\
000
\
020
\
020
\
020
\
265
\
265
\
265
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
!
\
371
\
004
\
001
\
000
\
000
\
021
\
000
,
\
000
\
000
\
000
\
000
a
\
000
=
\
000
\
000
\
005
\
376
`$
\
216
di
\
236
h
\
252
\
256
l
\
353
\
276
p,
\
317
tm
\
337
x
\
256
\
357
|m
\
001
@
\
240
E
\
305
\
000
\
364
\
216
4
\
206
R)$
\
005
\
201
\
214
\
007
r
\
012
{X
\
255
\
312
a
\
004
\
260
\
\
>
\
026
\
324
0
\
353
)
\
224
n
\
001
W+X
\
334
\
373
\
231
~
\
344
.
\
303
b
\
216
\
024
\
027
x<
\
273
\
307
\
255
G,rJiWN
\
014
{S}k"?ti
\
013
EdPQ
\
207
G@_%
\
000
\
026
yy
\
\
\
201
\
202
\
227
\
224
<
\
221
Fs$pOjWz
\
241
<r@vO
\
236
\
231
\
233
k
\
247
M
\
254
4
\
203
F
\
177
\
235
\
236
L#
\
247
\
256
Z
\
270
,
\
266
BxJ[
\
276
\
256
A]iE
\
304
\
305
\
262
\
273
E
\
313
\
201
\
275
i#
\
\
\
303
\
321
\
'
h
\
203
V
\
\
\
177
\
326
\
276
\
216
\
220
P~
\
335
\
230
_
\
264
\
013
\
342
\
275
\
344
KF
\
233
\
360
Q
\
212
\
352
\
246
\
000
\
367
\
274
s
\
361
\
236
\
334
\
347
T
\
341
;
\
341
\
246
\
220
2
\
177
\
314
2
\
211
`
\
242
o
\
325
@S
\
202
\
264
\
031
\
252
\
207
\
260
\
323
\
256
\
205
\
311
\
036
\
236
\
270
\
002
\
'
\
013
\
302
\
177
\
274
H
\
010
\
324
X
\
002
\
017
6
\
212
\
037
\
376
\
321
\
360
\
032
\
226
\
207
\
244
\
267
4(+^
\
202
\
346
r
\
205
J
\
021
1
\
375
\
241
Y#
\
256
f
\
012
7
\
315
>
\
272
\
002
\
325
\
307
g
\
012
(
\
007
\
205
\
312
#j
\
317
(
\
012
A
\
200
\
224
.
\
241
\
003
\
346
GS
\
247
\
033
\
245
\
344
\
264
\
366
\
015
L
\
'
PXQl]
\
266
\
263
\
243
\
232
\
260
?
\
245
\
316
\
371
\
362
\
225
\
035
\
332
\
243
J
\
273
\
332
Q
\
263
\
357
-D
\
241
T
\
327
\
270
\
265
\
013
W&
\
330
\
010
u
\
371
b
\
322
IW0
\
214
\
261
]
\
003
\
033
Va
\
365
Z#
\
207
\
213
a
\
030
k
\
264
7
\
262
\
014
p
\
354
\
024
[n
\
321
N
\
363
\
346
\
317
\
003
\
037
P
\
000
\
235
C
\
302
\
000
\
322
8(
\
244
\
363
YaA
\
005
\
022
\
255
_
\
237
@
\
260
\
000
A
\
212
\
326
\
256
qbp
\
321
\
332
\
266
\
011
\
334
=T
\
023
\
010
"!B
\
005
\
003
A
\
010
\
224
\
020
\
220
H
\
002
\
337
#
\
020
O
\
276
E
\
357
h
\
221
\
327
\
003
\
\
\
000
b@v
\
004
\
351
A.h
\
365
\
354
\
342
B
\
002
\
011
\
257
\
025
\
\
\
220
\
340
\
301
\
353
\
006
\
000
\
024
\
214
\
200
pA
\
300
\
353
\
012
\
364
\
241
k/
\
340
\
033
C
\
202
\
003
\
000
\
310
fZ
\
011
\
003
V
\
240
R
\
005
\
007
\
354
\
376
\
026
A
\
000
\
000
\
360
\
'
\
202
\
177
\
024
\
004
\
210
\
003
\
000
\
305
\
215
\
360
\
000
\
000
\
015
\
220
\
240
\
332
\
203
\
027
@
\
'
\
202
\
004
\
025
VpA
\
000
%
\
210
x
\
321
\
206
\
032
J
\
341
\
316
\
010
\
262
\
211
H"l
\
333
\
341
\
200
\
200
>"]P
\
002
\
212
\
011
\
010
`
\
002
\
006
6FP
\
200
\
001
\
'
\
024
p]
\
004
\
027
(8B
\
221
\
306
]
\
000
\
201
w>
\
002
iB
\
001
\
007
\
340
\
260
"v7J1
\
343
(
\
257
\
020
\
251
\
243
\
011
\
242
i
\
263
\
017
\
215
\
337
\
035
\
220
\
200
\
221
\
365
m4d
\
015
\
016
D
\
251
\
341
iN
\
354
\
346
Ng
\
253
\
200
I
\
240
\
031
\
356
09
\
245
\
205
7
\
311
I
\
302
\
200
7t
\
231
"&`
\
314
\
310
\
244
\
011
e
\
226
(
\
236
\
010
w
\
212
\
300
\
234
\
011
\
012
HX(
\
214
\
253
\
311
@
\
001
\
233
^
\
222
pg{%
\
340
\
035
\
224
&H
\
000
\
246
\
201
\
362
\
215
`@
\
001
"L
\
340
\
004
\
030
\
234
\
022
\
250
\
'
\
015
(V:
\
302
\
235
\
030
\
240
q
\
337
\
205
\
224
\
212
h@
\
177
\
006
\
000
\
250
\
210
\
004
\
007
\
310
\
207
\
337
\
005
\
257
-P
\
346
\
257
\
367
]p
\
353
\
203
\
271
\
256
:
\
203
\
236
\
211
F
\
340
\
247
\
010
\
332
9g
\
244
\
010
\
307
*=A
\
000
\
203
\
260
y
\
012
\
304
s#
\
014
\
007
D
\
207
,N
\
007
\
304
\
265
\
027
\
021
C
\
233
\
207
%B
\
366
[m
\
353
\
006
\
006
\
034
j
\
360
\
306
+
\
357
\
274
a
\
204
\
000
\
000
;'
lib/python/ZServer/medusa/mime_type_table.py
0 → 100644
View file @
c658c172
# -*- Python -*-
# Converted by ./convert_mime_type_table.py from:
# /usr/src2/apache_1.2b6/conf/mime.types
#
content_type_map
=
\
{
'ai'
:
'application/postscript'
,
'aif'
:
'audio/x-aiff'
,
'aifc'
:
'audio/x-aiff'
,
'aiff'
:
'audio/x-aiff'
,
'au'
:
'audio/basic'
,
'avi'
:
'video/x-msvideo'
,
'bcpio'
:
'application/x-bcpio'
,
'bin'
:
'application/octet-stream'
,
'cdf'
:
'application/x-netcdf'
,
'class'
:
'application/octet-stream'
,
'cpio'
:
'application/x-cpio'
,
'cpt'
:
'application/mac-compactpro'
,
'csh'
:
'application/x-csh'
,
'dcr'
:
'application/x-director'
,
'dir'
:
'application/x-director'
,
'dms'
:
'application/octet-stream'
,
'doc'
:
'application/msword'
,
'dvi'
:
'application/x-dvi'
,
'dxr'
:
'application/x-director'
,
'eps'
:
'application/postscript'
,
'etx'
:
'text/x-setext'
,
'exe'
:
'application/octet-stream'
,
'gif'
:
'image/gif'
,
'gtar'
:
'application/x-gtar'
,
'gz'
:
'application/x-gzip'
,
'hdf'
:
'application/x-hdf'
,
'hqx'
:
'application/mac-binhex40'
,
'htm'
:
'text/html'
,
'html'
:
'text/html'
,
'ice'
:
'x-conference/x-cooltalk'
,
'ief'
:
'image/ief'
,
'jpe'
:
'image/jpeg'
,
'jpeg'
:
'image/jpeg'
,
'jpg'
:
'image/jpeg'
,
'kar'
:
'audio/midi'
,
'latex'
:
'application/x-latex'
,
'lha'
:
'application/octet-stream'
,
'lzh'
:
'application/octet-stream'
,
'man'
:
'application/x-troff-man'
,
'me'
:
'application/x-troff-me'
,
'mid'
:
'audio/midi'
,
'midi'
:
'audio/midi'
,
'mif'
:
'application/x-mif'
,
'mov'
:
'video/quicktime'
,
'movie'
:
'video/x-sgi-movie'
,
'mp2'
:
'audio/mpeg'
,
'mpe'
:
'video/mpeg'
,
'mpeg'
:
'video/mpeg'
,
'mpg'
:
'video/mpeg'
,
'mpga'
:
'audio/mpeg'
,
'mp3'
:
'audio/mpeg'
,
'ms'
:
'application/x-troff-ms'
,
'nc'
:
'application/x-netcdf'
,
'oda'
:
'application/oda'
,
'pbm'
:
'image/x-portable-bitmap'
,
'pdb'
:
'chemical/x-pdb'
,
'pdf'
:
'application/pdf'
,
'pgm'
:
'image/x-portable-graymap'
,
'png'
:
'image/png'
,
'pnm'
:
'image/x-portable-anymap'
,
'ppm'
:
'image/x-portable-pixmap'
,
'ppt'
:
'application/powerpoint'
,
'ps'
:
'application/postscript'
,
'qt'
:
'video/quicktime'
,
'ra'
:
'audio/x-realaudio'
,
'ram'
:
'audio/x-pn-realaudio'
,
'ras'
:
'image/x-cmu-raster'
,
'rgb'
:
'image/x-rgb'
,
'roff'
:
'application/x-troff'
,
'rpm'
:
'audio/x-pn-realaudio-plugin'
,
'rtf'
:
'application/rtf'
,
'rtx'
:
'text/richtext'
,
'sgm'
:
'text/x-sgml'
,
'sgml'
:
'text/x-sgml'
,
'sh'
:
'application/x-sh'
,
'shar'
:
'application/x-shar'
,
'sit'
:
'application/x-stuffit'
,
'skd'
:
'application/x-koan'
,
'skm'
:
'application/x-koan'
,
'skp'
:
'application/x-koan'
,
'skt'
:
'application/x-koan'
,
'snd'
:
'audio/basic'
,
'src'
:
'application/x-wais-source'
,
'sv4cpio'
:
'application/x-sv4cpio'
,
'sv4crc'
:
'application/x-sv4crc'
,
't'
:
'application/x-troff'
,
'tar'
:
'application/x-tar'
,
'tcl'
:
'application/x-tcl'
,
'tex'
:
'application/x-tex'
,
'texi'
:
'application/x-texinfo'
,
'texinfo'
:
'application/x-texinfo'
,
'tif'
:
'image/tiff'
,
'tiff'
:
'image/tiff'
,
'tr'
:
'application/x-troff'
,
'tsv'
:
'text/tab-separated-values'
,
'txt'
:
'text/plain'
,
'ustar'
:
'application/x-ustar'
,
'vcd'
:
'application/x-cdlink'
,
'vrml'
:
'x-world/x-vrml'
,
'wav'
:
'audio/x-wav'
,
'wrl'
:
'x-world/x-vrml'
,
'xbm'
:
'image/x-xbitmap'
,
'xpm'
:
'image/x-xpixmap'
,
'xwd'
:
'image/x-xwindowdump'
,
'xyz'
:
'chemical/x-pdb'
,
'zip'
:
'application/zip'
,
}
lib/python/ZServer/medusa/monitor.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# Author: Sam Rushing <rushing@nightmare.com>
#
# python REPL channel.
#
RCS_ID
=
'$Id: monitor.py,v 1.15 2003/03/18 21:15:17 fdrake Exp $'
import
md5
import
socket
import
string
import
sys
import
time
if
RCS_ID
.
startswith
(
'$Id: '
):
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
else
:
VERSION
=
'0.0'
import
asyncore
import
asynchat
from
counter
import
counter
import
producers
class
monitor_channel
(
asynchat
.
async_chat
):
try_linemode
=
1
def
__init__
(
self
,
server
,
sock
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
''
# local bindings specific to this channel
self
.
local_env
=
sys
.
modules
[
'__main__'
].
__dict__
.
copy
()
self
.
push
(
'Python '
+
sys
.
version
+
'
\
r
\
n
'
)
self
.
push
(
sys
.
copyright
+
'
\
r
\
n
'
)
self
.
push
(
'Welcome to %s
\
r
\
n
'
%
self
)
self
.
push
(
"[Hint: try 'from __main__ import *']
\
r
\
n
"
)
self
.
prompt
()
self
.
number
=
server
.
total_sessions
.
as_long
()
self
.
line_counter
=
counter
()
self
.
multi_line
=
[]
def
handle_connect
(
self
):
# send IAC DO LINEMODE
self
.
push
(
'
\
377
\
375
\
"
'
)
def
close
(
self
):
self
.
server
.
closed_sessions
.
increment
()
asynchat
.
async_chat
.
close
(
self
)
def
prompt
(
self
):
self
.
push
(
'>>> '
)
def
collect_incoming_data
(
self
,
data
):
self
.
data
=
self
.
data
+
data
if
len
(
self
.
data
)
>
1024
:
# denial of service.
self
.
push
(
'BCNU
\
r
\
n
'
)
self
.
close_when_done
()
def
found_terminator
(
self
):
line
=
self
.
clean_line
(
self
.
data
)
self
.
data
=
''
self
.
line_counter
.
increment
()
# check for special case inputs...
if
not
line
and
not
self
.
multi_line
:
self
.
prompt
()
return
if
line
in
[
'
\
004
'
,
'exit'
]:
self
.
push
(
'BCNU
\
r
\
n
'
)
self
.
close_when_done
()
return
oldout
=
sys
.
stdout
olderr
=
sys
.
stderr
try
:
p
=
output_producer
(
self
,
olderr
)
sys
.
stdout
=
p
sys
.
stderr
=
p
try
:
# this is, of course, a blocking operation.
# if you wanted to thread this, you would have
# to synchronize, etc... and treat the output
# like a pipe. Not Fun.
#
# try eval first. If that fails, try exec. If that fails,
# hurl.
try
:
if
self
.
multi_line
:
# oh, this is horrible...
raise
SyntaxError
co
=
compile
(
line
,
repr
(
self
),
'eval'
)
result
=
eval
(
co
,
self
.
local_env
)
method
=
'eval'
if
result
is
not
None
:
print
repr
(
result
)
self
.
local_env
[
'_'
]
=
result
except
SyntaxError
:
try
:
if
self
.
multi_line
:
if
line
and
line
[
0
]
in
[
' '
,
'
\
t
'
]:
self
.
multi_line
.
append
(
line
)
self
.
push
(
'... '
)
return
else
:
self
.
multi_line
.
append
(
line
)
line
=
string
.
join
(
self
.
multi_line
,
'
\
n
'
)
co
=
compile
(
line
,
repr
(
self
),
'exec'
)
self
.
multi_line
=
[]
else
:
co
=
compile
(
line
,
repr
(
self
),
'exec'
)
except
SyntaxError
,
why
:
if
why
[
0
]
==
'unexpected EOF while parsing'
:
self
.
push
(
'... '
)
self
.
multi_line
.
append
(
line
)
return
else
:
t
,
v
,
tb
=
sys
.
exc_info
()
del
tb
raise
t
,
v
exec
co
in
self
.
local_env
method
=
'exec'
except
:
method
=
'exception'
self
.
multi_line
=
[]
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'warning'
)
finally
:
sys
.
stdout
=
oldout
sys
.
stderr
=
olderr
self
.
log_info
(
'%s:%s (%s)> %s'
%
(
self
.
number
,
self
.
line_counter
,
method
,
repr
(
line
))
)
self
.
push_with_producer
(
p
)
self
.
prompt
()
# for now, we ignore any telnet option stuff sent to
# us, and we process the backspace key ourselves.
# gee, it would be fun to write a full-blown line-editing
# environment, etc...
def
clean_line
(
self
,
line
):
chars
=
[]
for
ch
in
line
:
oc
=
ord
(
ch
)
if
oc
<
127
:
if
oc
in
[
8
,
177
]:
# backspace
chars
=
chars
[:
-
1
]
else
:
chars
.
append
(
ch
)
return
string
.
join
(
chars
,
''
)
class
monitor_server
(
asyncore
.
dispatcher
):
SERVER_IDENT
=
'Monitor Server (V%s)'
%
VERSION
channel_class
=
monitor_channel
def
__init__
(
self
,
hostname
=
'127.0.0.1'
,
port
=
8023
):
self
.
hostname
=
hostname
self
.
port
=
port
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
hostname
,
port
))
self
.
log_info
(
'%s started on port %d'
%
(
self
.
SERVER_IDENT
,
port
))
self
.
listen
(
5
)
self
.
closed
=
0
self
.
failed_auths
=
0
self
.
total_sessions
=
counter
()
self
.
closed_sessions
=
counter
()
def
writable
(
self
):
return
0
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
self
.
log_info
(
'Incoming monitor connection from %s:%d'
%
addr
)
self
.
channel_class
(
self
,
conn
,
addr
)
self
.
total_sessions
.
increment
()
def
status
(
self
):
return
producers
.
simple_producer
(
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
+
'<br><b>Total Sessions:</b> %s'
%
self
.
total_sessions
+
'<br><b>Current Sessions:</b> %d'
%
(
self
.
total_sessions
.
as_long
()
-
self
.
closed_sessions
.
as_long
()
)
)
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
joinfields
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
class
secure_monitor_channel
(
monitor_channel
):
authorized
=
0
def
__init__
(
self
,
server
,
sock
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
''
# local bindings specific to this channel
self
.
local_env
=
{}
# send timestamp string
self
.
timestamp
=
str
(
time
.
time
())
self
.
count
=
0
self
.
line_counter
=
counter
()
self
.
number
=
int
(
server
.
total_sessions
.
as_long
())
self
.
multi_line
=
[]
self
.
push
(
self
.
timestamp
+
'
\
r
\
n
'
)
def
found_terminator
(
self
):
if
not
self
.
authorized
:
if
hex_digest
(
'%s%s'
%
(
self
.
timestamp
,
self
.
server
.
password
))
!=
self
.
data
:
self
.
log_info
(
'%s: failed authorization'
%
self
,
'warning'
)
self
.
server
.
failed_auths
=
self
.
server
.
failed_auths
+
1
self
.
close
()
else
:
self
.
authorized
=
1
self
.
push
(
'Python '
+
sys
.
version
+
'
\
r
\
n
'
)
self
.
push
(
sys
.
copyright
+
'
\
r
\
n
'
)
self
.
push
(
'Welcome to %s
\
r
\
n
'
%
self
)
self
.
prompt
()
self
.
data
=
''
else
:
monitor_channel
.
found_terminator
(
self
)
class
secure_encrypted_monitor_channel
(
secure_monitor_channel
):
"Wrap send() and recv() with a stream cipher"
def
__init__
(
self
,
server
,
conn
,
addr
):
key
=
server
.
password
self
.
outgoing
=
server
.
cipher
.
new
(
key
)
self
.
incoming
=
server
.
cipher
.
new
(
key
)
secure_monitor_channel
.
__init__
(
self
,
server
,
conn
,
addr
)
def
send
(
self
,
data
):
# send the encrypted data instead
ed
=
self
.
outgoing
.
encrypt
(
data
)
return
secure_monitor_channel
.
send
(
self
,
ed
)
def
recv
(
self
,
block_size
):
data
=
secure_monitor_channel
.
recv
(
self
,
block_size
)
if
data
:
dd
=
self
.
incoming
.
decrypt
(
data
)
return
dd
else
:
return
data
class
secure_monitor_server
(
monitor_server
):
channel_class
=
secure_monitor_channel
def
__init__
(
self
,
password
,
hostname
=
''
,
port
=
8023
):
monitor_server
.
__init__
(
self
,
hostname
,
port
)
self
.
password
=
password
def
status
(
self
):
p
=
monitor_server
.
status
(
self
)
# kludge
p
.
data
=
p
.
data
+
(
'<br><b>Failed Authorizations:</b> %d'
%
self
.
failed_auths
)
return
p
# don't try to print from within any of the methods
# of this object. 8^)
class
output_producer
:
def
__init__
(
self
,
channel
,
real_stderr
):
self
.
channel
=
channel
self
.
data
=
''
# use _this_ for debug output
self
.
stderr
=
real_stderr
def
check_data
(
self
):
if
len
(
self
.
data
)
>
1
<<
16
:
# runaway output, close it.
self
.
channel
.
close
()
def
write
(
self
,
data
):
lines
=
string
.
splitfields
(
data
,
'
\
n
'
)
data
=
string
.
join
(
lines
,
'
\
r
\
n
'
)
self
.
data
=
self
.
data
+
data
self
.
check_data
()
def
writeline
(
self
,
line
):
self
.
data
=
self
.
data
+
line
+
'
\
r
\
n
'
self
.
check_data
()
def
writelines
(
self
,
lines
):
self
.
data
=
self
.
data
+
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
self
.
check_data
()
def
ready
(
self
):
return
(
len
(
self
.
data
)
>
0
)
def
flush
(
self
):
pass
def
softspace
(
self
,
*
args
):
pass
def
more
(
self
):
if
self
.
data
:
result
=
self
.
data
[:
512
]
self
.
data
=
self
.
data
[
512
:]
return
result
else
:
return
''
if
__name__
==
'__main__'
:
import
string
import
sys
if
'-s'
in
sys
.
argv
:
sys
.
argv
.
remove
(
'-s'
)
print
'Enter password: '
,
password
=
raw_input
()
else
:
password
=
None
if
'-e'
in
sys
.
argv
:
sys
.
argv
.
remove
(
'-e'
)
encrypt
=
1
else
:
encrypt
=
0
if
len
(
sys
.
argv
)
>
1
:
port
=
string
.
atoi
(
sys
.
argv
[
1
])
else
:
port
=
8023
if
password
is
not
None
:
s
=
secure_monitor_server
(
password
,
''
,
port
)
if
encrypt
:
s
.
channel_class
=
secure_encrypted_monitor_channel
import
sapphire
s
.
cipher
=
sapphire
else
:
s
=
monitor_server
(
''
,
port
)
asyncore
.
loop
(
use_poll
=
1
)
lib/python/ZServer/medusa/monitor_client.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, unix version.
import
asyncore
import
asynchat
import
regsub
import
socket
import
string
import
sys
import
os
import
md5
import
time
class
stdin_channel
(
asyncore
.
file_dispatcher
):
def
handle_read
(
self
):
data
=
self
.
recv
(
512
)
if
not
data
:
print
'
\
n
closed.'
self
.
sock_channel
.
close
()
try
:
self
.
close
()
except
:
pass
data
=
regsub
.
gsub
(
'
\
n
'
,
'
\
r
\
n
'
,
data
)
self
.
sock_channel
.
push
(
data
)
def
writable
(
self
):
return
0
def
log
(
self
,
*
ignore
):
pass
class
monitor_client
(
asynchat
.
async_chat
):
def
__init__
(
self
,
password
,
addr
=
(
''
,
8023
),
socket_type
=
socket
.
AF_INET
):
asynchat
.
async_chat
.
__init__
(
self
)
self
.
create_socket
(
socket_type
,
socket
.
SOCK_STREAM
)
self
.
terminator
=
'
\
r
\
n
'
self
.
connect
(
addr
)
self
.
sent_auth
=
0
self
.
timestamp
=
''
self
.
password
=
password
def
collect_incoming_data
(
self
,
data
):
if
not
self
.
sent_auth
:
self
.
timestamp
=
self
.
timestamp
+
data
else
:
sys
.
stdout
.
write
(
data
)
sys
.
stdout
.
flush
()
def
found_terminator
(
self
):
if
not
self
.
sent_auth
:
self
.
push
(
hex_digest
(
self
.
timestamp
+
self
.
password
)
+
'
\
r
\
n
'
)
self
.
sent_auth
=
1
else
:
print
def
handle_close
(
self
):
# close all the channels, which will make the standard main
# loop exit.
map
(
lambda
x
:
x
.
close
(),
asyncore
.
socket_map
.
values
())
def
log
(
self
,
*
ignore
):
pass
class
encrypted_monitor_client
(
monitor_client
):
"Wrap push() and recv() with a stream cipher"
def
init_cipher
(
self
,
cipher
,
key
):
self
.
outgoing
=
cipher
.
new
(
key
)
self
.
incoming
=
cipher
.
new
(
key
)
def
push
(
self
,
data
):
# push the encrypted data instead
return
monitor_client
.
push
(
self
,
self
.
outgoing
.
encrypt
(
data
))
def
recv
(
self
,
block_size
):
data
=
monitor_client
.
recv
(
self
,
block_size
)
if
data
:
return
self
.
incoming
.
decrypt
(
data
)
else
:
return
data
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
join
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
==
1
:
print
'Usage: %s host port'
%
sys
.
argv
[
0
]
sys
.
exit
(
0
)
if
(
'-e'
in
sys
.
argv
):
encrypt
=
1
sys
.
argv
.
remove
(
'-e'
)
else
:
encrypt
=
0
sys
.
stderr
.
write
(
'Enter Password: '
)
sys
.
stderr
.
flush
()
import
os
try
:
os
.
system
(
'stty -echo'
)
p
=
raw_input
()
print
finally
:
os
.
system
(
'stty echo'
)
stdin
=
stdin_channel
(
0
)
if
len
(
sys
.
argv
)
>
1
:
if
encrypt
:
client
=
encrypted_monitor_client
(
p
,
(
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
import
sapphire
client
.
init_cipher
(
sapphire
,
p
)
else
:
client
=
monitor_client
(
p
,
(
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
else
:
# default to local host, 'standard' port
client
=
monitor_client
(
p
)
stdin
.
sock_channel
=
client
asyncore
.
loop
()
lib/python/ZServer/medusa/monitor_client_win32.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, win32 version
# since we can't do select() on stdin/stdout, we simply
# use threads and blocking sockets. <sigh>
import
regsub
import
socket
import
string
import
sys
import
thread
import
md5
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
join
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
def
reader
(
lock
,
sock
,
password
):
# first grab the timestamp
ts
=
sock
.
recv
(
1024
)[:
-
2
]
sock
.
send
(
hex_digest
(
ts
+
password
)
+
'
\
r
\
n
'
)
while
1
:
d
=
sock
.
recv
(
1024
)
if
not
d
:
lock
.
release
()
print
'Connection closed. Hit <return> to exit'
thread
.
exit
()
sys
.
stdout
.
write
(
d
)
sys
.
stdout
.
flush
()
def
writer
(
lock
,
sock
,
barrel
=
"just kidding"
):
while
lock
.
locked
():
sock
.
send
(
sys
.
stdin
.
readline
()[:
-
1
]
+
'
\
r
\
n
'
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
==
1
:
print
'Usage: %s host port'
sys
.
exit
(
0
)
print
'Enter Password: '
,
p
=
raw_input
()
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
((
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
l
=
thread
.
allocate_lock
()
l
.
acquire
()
thread
.
start_new_thread
(
reader
,
(
l
,
s
,
p
))
writer
(
l
,
s
)
lib/python/ZServer/medusa/producers.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
RCS_ID
=
'$Id: producers.py,v 1.12 2003/03/18 21:15:17 fdrake Exp $'
import
string
"""
A collection of producers.
Each producer implements a particular feature: They can be combined
in various ways to get interesting and useful behaviors.
For example, you can feed dynamically-produced output into the compressing
producer, then wrap this with the 'chunked' transfer-encoding producer.
"""
class
simple_producer
:
"producer for a string"
def
__init__
(
self
,
data
,
buffer_size
=
1024
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
def
more
(
self
):
if
len
(
self
.
data
)
>
self
.
buffer_size
:
result
=
self
.
data
[:
self
.
buffer_size
]
self
.
data
=
self
.
data
[
self
.
buffer_size
:]
return
result
else
:
result
=
self
.
data
self
.
data
=
''
return
result
class
scanning_producer
:
"like simple_producer, but more efficient for large strings"
def
__init__
(
self
,
data
,
buffer_size
=
1024
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
self
.
pos
=
0
def
more
(
self
):
if
self
.
pos
<
len
(
self
.
data
):
lp
=
self
.
pos
rp
=
min
(
len
(
self
.
data
),
self
.
pos
+
self
.
buffer_size
)
result
=
self
.
data
[
lp
:
rp
]
self
.
pos
=
self
.
pos
+
len
(
result
)
return
result
else
:
return
''
class
lines_producer
:
"producer for a list of lines"
def
__init__
(
self
,
lines
):
self
.
lines
=
lines
def
ready
(
self
):
return
len
(
self
.
lines
)
def
more
(
self
):
if
self
.
lines
:
chunk
=
self
.
lines
[:
50
]
self
.
lines
=
self
.
lines
[
50
:]
return
string
.
join
(
chunk
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
else
:
return
''
class
buffer_list_producer
:
"producer for a list of buffers"
# i.e., data == string.join (buffers, '')
def
__init__
(
self
,
buffers
):
self
.
index
=
0
self
.
buffers
=
buffers
def
more
(
self
):
if
self
.
index
>=
len
(
self
.
buffers
):
return
''
else
:
data
=
self
.
buffers
[
self
.
index
]
self
.
index
=
self
.
index
+
1
return
data
class
file_producer
:
"producer wrapper for file[-like] objects"
# match http_channel's outgoing buffer size
out_buffer_size
=
1
<<
16
def
__init__
(
self
,
file
):
self
.
done
=
0
self
.
file
=
file
def
more
(
self
):
if
self
.
done
:
return
''
else
:
data
=
self
.
file
.
read
(
self
.
out_buffer_size
)
if
not
data
:
self
.
file
.
close
()
del
self
.
file
self
.
done
=
1
return
''
else
:
return
data
# A simple output producer. This one does not [yet] have
# the safety feature builtin to the monitor channel: runaway
# output will not be caught.
# don't try to print from within any of the methods
# of this object.
class
output_producer
:
"Acts like an output file; suitable for capturing sys.stdout"
def
__init__
(
self
):
self
.
data
=
''
def
write
(
self
,
data
):
lines
=
string
.
splitfields
(
data
,
'
\
n
'
)
data
=
string
.
join
(
lines
,
'
\
r
\
n
'
)
self
.
data
=
self
.
data
+
data
def
writeline
(
self
,
line
):
self
.
data
=
self
.
data
+
line
+
'
\
r
\
n
'
def
writelines
(
self
,
lines
):
self
.
data
=
self
.
data
+
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
def
ready
(
self
):
return
(
len
(
self
.
data
)
>
0
)
def
flush
(
self
):
pass
def
softspace
(
self
,
*
args
):
pass
def
more
(
self
):
if
self
.
data
:
result
=
self
.
data
[:
512
]
self
.
data
=
self
.
data
[
512
:]
return
result
else
:
return
''
class
composite_producer
:
"combine a fifo of producers into one"
def
__init__
(
self
,
producers
):
self
.
producers
=
producers
def
more
(
self
):
while
len
(
self
.
producers
):
p
=
self
.
producers
.
first
()
d
=
p
.
more
()
if
d
:
return
d
else
:
self
.
producers
.
pop
()
else
:
return
''
class
globbing_producer
:
"""
'glob' the output from a producer into a particular buffer size.
helps reduce the number of calls to send(). [this appears to
gain about 30% performance on requests to a single channel]
"""
def
__init__
(
self
,
producer
,
buffer_size
=
1
<<
16
):
self
.
producer
=
producer
self
.
buffer
=
''
self
.
buffer_size
=
buffer_size
def
more
(
self
):
while
len
(
self
.
buffer
)
<
self
.
buffer_size
:
data
=
self
.
producer
.
more
()
if
data
:
self
.
buffer
=
self
.
buffer
+
data
else
:
break
r
=
self
.
buffer
self
.
buffer
=
''
return
r
class
hooked_producer
:
"""
A producer that will call <function> when it empties,.
with an argument of the number of bytes produced. Useful
for logging/instrumentation purposes.
"""
def
__init__
(
self
,
producer
,
function
):
self
.
producer
=
producer
self
.
function
=
function
self
.
bytes
=
0
def
more
(
self
):
if
self
.
producer
:
result
=
self
.
producer
.
more
()
if
not
result
:
self
.
producer
=
None
self
.
function
(
self
.
bytes
)
else
:
self
.
bytes
=
self
.
bytes
+
len
(
result
)
return
result
else
:
return
''
# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
# correct. In the face of Strange Files, it is conceivable that
# reading a 'file' may produce an amount of data not matching that
# reported by os.stat() [text/binary mode issues, perhaps the file is
# being appended to, etc..] This makes the chunked encoding a True
# Blessing, and it really ought to be used even with normal files.
# How beautifully it blends with the concept of the producer.
class
chunked_producer
:
"""A producer that implements the 'chunked' transfer coding for HTTP/1.1.
Here is a sample usage:
request['Transfer-Encoding'] = 'chunked'
request.push (
producers.chunked_producer (your_producer)
)
request.done()
"""
def
__init__
(
self
,
producer
,
footers
=
None
):
self
.
producer
=
producer
self
.
footers
=
footers
def
more
(
self
):
if
self
.
producer
:
data
=
self
.
producer
.
more
()
if
data
:
return
'%x
\
r
\
n
%s
\
r
\
n
'
%
(
len
(
data
),
data
)
else
:
self
.
producer
=
None
if
self
.
footers
:
return
string
.
join
(
[
'0'
]
+
self
.
footers
,
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
else
:
return
'0
\
r
\
n
\
r
\
n
'
else
:
return
''
# Unfortunately this isn't very useful right now (Aug 97), because
# apparently the browsers don't do on-the-fly decompression. Which
# is sad, because this could _really_ speed things up, especially for
# low-bandwidth clients (i.e., most everyone).
try
:
import
zlib
except
ImportError
:
zlib
=
None
class
compressed_producer
:
"""
Compress another producer on-the-fly, using ZLIB
[Unfortunately, none of the current browsers seem to support this]
"""
# Note: It's not very efficient to have the server repeatedly
# compressing your outgoing files: compress them ahead of time, or
# use a compress-once-and-store scheme. However, if you have low
# bandwidth and low traffic, this may make more sense than
# maintaining your source files compressed.
#
# Can also be used for compressing dynamically-produced output.
def
__init__
(
self
,
producer
,
level
=
5
):
self
.
producer
=
producer
self
.
compressor
=
zlib
.
compressobj
(
level
)
def
more
(
self
):
if
self
.
producer
:
cdata
=
''
# feed until we get some output
while
not
cdata
:
data
=
self
.
producer
.
more
()
if
not
data
:
self
.
producer
=
None
return
self
.
compressor
.
flush
()
else
:
cdata
=
self
.
compressor
.
compress
(
data
)
return
cdata
else
:
return
''
class
escaping_producer
:
"A producer that escapes a sequence of characters"
" Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
def
__init__
(
self
,
producer
,
esc_from
=
'
\
r
\
n
.'
,
esc_to
=
'
\
r
\
n
..'
):
self
.
producer
=
producer
self
.
esc_from
=
esc_from
self
.
esc_to
=
esc_to
self
.
buffer
=
''
from
asynchat
import
find_prefix_at_end
self
.
find_prefix_at_end
=
find_prefix_at_end
def
more
(
self
):
esc_from
=
self
.
esc_from
esc_to
=
self
.
esc_to
buffer
=
self
.
buffer
+
self
.
producer
.
more
()
if
buffer
:
buffer
=
string
.
replace
(
buffer
,
esc_from
,
esc_to
)
i
=
self
.
find_prefix_at_end
(
buffer
,
esc_from
)
if
i
:
# we found a prefix
self
.
buffer
=
buffer
[
-
i
:]
return
buffer
[:
-
i
]
else
:
# no prefix, return it all
self
.
buffer
=
''
return
buffer
else
:
return
buffer
lib/python/ZServer/medusa/put_handler.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: put_handler.py,v 1.5 2003/03/18 21:15:17 fdrake Exp $'
import
re
import
string
import
default_handler
unquote
=
default_handler
.
unquote
get_header
=
default_handler
.
get_header
last_request
=
None
class
put_handler
:
def
__init__
(
self
,
filesystem
,
uri_regex
):
self
.
filesystem
=
filesystem
if
type
(
uri_regex
)
==
type
(
''
):
self
.
uri_regex
=
re
.
compile
(
uri_regex
)
else
:
self
.
uri_regex
=
uri_regex
def
match
(
self
,
request
):
uri
=
request
.
uri
if
request
.
command
==
'put'
:
m
=
self
.
uri_regex
.
match
(
uri
)
if
m
and
m
.
end
()
==
len
(
uri
):
return
1
return
0
def
handle_request
(
self
,
request
):
path
,
params
,
query
,
fragment
=
request
.
split_uri
()
# strip off leading slashes
while
path
and
path
[
0
]
==
'/'
:
path
=
path
[
1
:]
if
'%'
in
path
:
path
=
unquote
(
path
)
# make sure there's a content-length header
cl
=
get_header
(
CONTENT_LENGTH
,
request
.
header
)
if
not
cl
:
request
.
error
(
411
)
return
else
:
cl
=
string
.
atoi
(
cl
)
# don't let the try to overwrite a directory
if
self
.
filesystem
.
isdir
(
path
):
request
.
error
(
405
)
return
is_update
=
self
.
filesystem
.
isfile
(
path
)
try
:
output_file
=
self
.
filesystem
.
open
(
path
,
'wb'
)
except
:
request
.
error
(
405
)
return
request
.
collector
=
put_collector
(
output_file
,
cl
,
request
,
is_update
)
# no terminator while receiving PUT data
request
.
channel
.
set_terminator
(
None
)
# don't respond yet, wait until we've received the data...
class
put_collector
:
def
__init__
(
self
,
file
,
length
,
request
,
is_update
):
self
.
file
=
file
self
.
length
=
length
self
.
request
=
request
self
.
is_update
=
is_update
self
.
bytes_in
=
0
def
collect_incoming_data
(
self
,
data
):
ld
=
len
(
data
)
bi
=
self
.
bytes_in
if
(
bi
+
ld
)
>=
self
.
length
:
# last bit of data
chunk
=
self
.
length
-
bi
self
.
file
.
write
(
data
[:
chunk
])
self
.
file
.
close
()
if
chunk
!=
ld
:
print
'orphaned %d bytes: <%s>'
%
(
ld
-
chunk
,
repr
(
data
[
chunk
:]))
# do some housekeeping
r
=
self
.
request
ch
=
r
.
channel
ch
.
current_request
=
None
# set the terminator back to the default
ch
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
if
self
.
is_update
:
r
.
reply_code
=
204
# No content
r
.
done
()
else
:
r
.
reply_now
(
201
)
# Created
# avoid circular reference
del
self
.
request
else
:
self
.
file
.
write
(
data
)
self
.
bytes_in
=
self
.
bytes_in
+
ld
def
found_terminator
(
self
):
# shouldn't be called
pass
CONTENT_LENGTH
=
re
.
compile
(
'Content-Length: ([0-9]+)'
,
re
.
IGNORECASE
)
lib/python/ZServer/medusa/redirecting_handler.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: redirecting_handler.py,v 1.5 2003/03/18 21:15:17 fdrake Exp $'
import
re
import
counter
class
redirecting_handler
:
def
__init__
(
self
,
pattern
,
redirect
,
regex_flag
=
re
.
IGNORECASE
):
self
.
pattern
=
pattern
self
.
redirect
=
redirect
self
.
patreg
=
re
.
compile
(
pattern
,
regex_flag
)
self
.
hits
=
counter
.
counter
()
def
match
(
self
,
request
):
m
=
self
.
patref
.
match
(
request
.
uri
)
return
(
m
and
(
m
.
end
()
==
len
(
request
.
uri
)))
def
handle_request
(
self
,
request
):
self
.
hits
.
increment
()
m
=
self
.
patreg
.
match
(
request
.
uri
)
part
=
m
.
group
(
1
)
request
[
'Location'
]
=
self
.
redirect
%
part
request
.
error
(
302
)
# moved temporarily
def
__repr__
(
self
):
return
'<Redirecting Handler at %08x [%s => %s]>'
%
(
id
(
self
),
repr
(
self
.
pattern
),
repr
(
self
.
redirect
)
)
def
status
(
self
):
import
producers
return
producers
.
simple_producer
(
'<li> Redirecting Handler %s => %s <b>Hits</b>: %s'
%
(
self
.
pattern
,
self
.
redirect
,
self
.
hits
)
)
lib/python/ZServer/medusa/resolver.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
#
RCS_ID
=
'$Id: resolver.py,v 1.12 2003/03/18 21:15:17 fdrake Exp $'
# Fast, low-overhead asynchronous name resolver. uses 'pre-cooked'
# DNS requests, unpacks only as much as it needs of the reply.
# see rfc1035 for details
import
string
import
asyncore
import
socket
import
sys
import
time
from
counter
import
counter
if
RCS_ID
.
startswith
(
'$Id: '
):
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
else
:
VERSION
=
'0.0'
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# question
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / QNAME /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QTYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QCLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# build a DNS address request, _quickly_
def
fast_address_request
(
host
,
id
=
0
):
return
(
'%c%c'
%
(
chr
((
id
>>
8
)
&
0xff
),
chr
(
id
&
0xff
))
+
'
\
001
\
000
\
000
\
001
\
000
\
000
\
000
\
000
\
000
\
000
%s
\
000
\
000
\
001
\
000
\
001
'
%
(
string
.
join
(
map
(
lambda
part
:
'%c%s'
%
(
chr
(
len
(
part
)),
part
),
string
.
split
(
host
,
'.'
)
),
''
)
)
)
def
fast_ptr_request
(
host
,
id
=
0
):
return
(
'%c%c'
%
(
chr
((
id
>>
8
)
&
0xff
),
chr
(
id
&
0xff
))
+
'
\
001
\
000
\
000
\
001
\
000
\
000
\
000
\
000
\
000
\
000
%s
\
000
\
000
\
014
\
000
\
001
'
%
(
string
.
join
(
map
(
lambda
part
:
'%c%s'
%
(
chr
(
len
(
part
)),
part
),
string
.
split
(
host
,
'.'
)
),
''
)
)
)
def
unpack_name
(
r
,
pos
):
n
=
[]
while
1
:
ll
=
ord
(
r
[
pos
])
if
(
ll
&
0xc0
):
# compression
pos
=
(
ll
&
0x3f
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
elif
ll
==
0
:
break
else
:
pos
=
pos
+
1
n
.
append
(
r
[
pos
:
pos
+
ll
])
pos
=
pos
+
ll
return
string
.
join
(
n
,
'.'
)
def
skip_name
(
r
,
pos
):
s
=
pos
while
1
:
ll
=
ord
(
r
[
pos
])
if
(
ll
&
0xc0
):
# compression
return
pos
+
2
elif
ll
==
0
:
pos
=
pos
+
1
break
else
:
pos
=
pos
+
ll
+
1
return
pos
def
unpack_ttl
(
r
,
pos
):
return
reduce
(
lambda
x
,
y
:
(
x
<<
8
)
|
y
,
map
(
ord
,
r
[
pos
:
pos
+
4
])
)
# resource record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def
unpack_address_reply
(
r
):
ancount
=
(
ord
(
r
[
6
])
<<
8
)
+
(
ord
(
r
[
7
]))
# skip question, first name starts at 12,
# this is followed by QTYPE and QCLASS
pos
=
skip_name
(
r
,
12
)
+
4
if
ancount
:
# we are looking very specifically for
# an answer with TYPE=A, CLASS=IN (\000\001\000\001)
for
an
in
range
(
ancount
):
pos
=
skip_name
(
r
,
pos
)
if
r
[
pos
:
pos
+
4
]
==
'
\
000
\
001
\
000
\
001
'
:
return
(
unpack_ttl
(
r
,
pos
+
4
),
'%d.%d.%d.%d'
%
tuple
(
map
(
ord
,
r
[
pos
+
10
:
pos
+
14
]))
)
# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
pos
=
pos
+
8
rdlength
=
(
ord
(
r
[
pos
])
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
pos
=
pos
+
2
+
rdlength
return
0
,
None
else
:
return
0
,
None
def
unpack_ptr_reply
(
r
):
ancount
=
(
ord
(
r
[
6
])
<<
8
)
+
(
ord
(
r
[
7
]))
# skip question, first name starts at 12,
# this is followed by QTYPE and QCLASS
pos
=
skip_name
(
r
,
12
)
+
4
if
ancount
:
# we are looking very specifically for
# an answer with TYPE=PTR, CLASS=IN (\000\014\000\001)
for
an
in
range
(
ancount
):
pos
=
skip_name
(
r
,
pos
)
if
r
[
pos
:
pos
+
4
]
==
'
\
000
\
014
\
000
\
001
'
:
return
(
unpack_ttl
(
r
,
pos
+
4
),
unpack_name
(
r
,
pos
+
10
)
)
# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
pos
=
pos
+
8
rdlength
=
(
ord
(
r
[
pos
])
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
pos
=
pos
+
2
+
rdlength
return
0
,
None
else
:
return
0
,
None
# This is a UDP (datagram) resolver.
#
# It may be useful to implement a TCP resolver. This would presumably
# give us more reliable behavior when things get too busy. A TCP
# client would have to manage the connection carefully, since the
# server is allowed to close it at will (the RFC recommends closing
# after 2 minutes of idle time).
#
# Note also that the TCP client will have to prepend each request
# with a 2-byte length indicator (see rfc1035).
#
class
resolver
(
asyncore
.
dispatcher
):
id
=
counter
()
def
__init__
(
self
,
server
=
'127.0.0.1'
):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_DGRAM
)
self
.
server
=
server
self
.
request_map
=
{}
self
.
last_reap_time
=
int
(
time
.
time
())
# reap every few minutes
def
writable
(
self
):
return
0
def
log
(
self
,
*
args
):
pass
def
handle_close
(
self
):
self
.
log_info
(
'closing!'
)
self
.
close
()
def
handle_error
(
self
):
# don't close the connection on error
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'Problem with DNS lookup (%s:%s %s)'
%
(
t
,
v
,
tbinfo
),
'error'
)
def
get_id
(
self
):
return
(
self
.
id
.
as_long
()
%
(
1
<<
16
))
def
reap
(
self
):
# find DNS requests that have timed out
now
=
int
(
time
.
time
())
if
now
-
self
.
last_reap_time
>
180
:
# reap every 3 minutes
self
.
last_reap_time
=
now
# update before we forget
for
k
,(
host
,
unpack
,
callback
,
when
)
in
self
.
request_map
.
items
():
if
now
-
when
>
180
:
# over 3 minutes old
del
self
.
request_map
[
k
]
try
:
# same code as in handle_read
callback
(
host
,
0
,
None
)
# timeout val is (0,None)
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'error'
)
def
resolve
(
self
,
host
,
callback
):
self
.
reap
()
# first, get rid of old guys
self
.
socket
.
sendto
(
fast_address_request
(
host
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
(
host
,
unpack_address_reply
,
callback
,
int
(
time
.
time
()))
self
.
id
.
increment
()
def
resolve_ptr
(
self
,
host
,
callback
):
self
.
reap
()
# first, get rid of old guys
ip
=
string
.
split
(
host
,
'.'
)
ip
.
reverse
()
ip
=
string
.
join
(
ip
,
'.'
)
+
'.in-addr.arpa'
self
.
socket
.
sendto
(
fast_ptr_request
(
ip
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
(
host
,
unpack_ptr_reply
,
callback
,
int
(
time
.
time
()))
self
.
id
.
increment
()
def
handle_read
(
self
):
reply
,
whence
=
self
.
socket
.
recvfrom
(
512
)
# for security reasons we may want to double-check
# that <whence> is the server we sent the request to.
id
=
(
ord
(
reply
[
0
])
<<
8
)
+
ord
(
reply
[
1
])
if
self
.
request_map
.
has_key
(
id
):
host
,
unpack
,
callback
,
when
=
self
.
request_map
[
id
]
del
self
.
request_map
[
id
]
ttl
,
answer
=
unpack
(
reply
)
try
:
callback
(
host
,
ttl
,
answer
)
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'error'
)
class
rbl
(
resolver
):
def
resolve_maps
(
self
,
host
,
callback
):
ip
=
string
.
split
(
host
,
'.'
)
ip
.
reverse
()
ip
=
string
.
join
(
ip
,
'.'
)
+
'.rbl.maps.vix.com'
self
.
socket
.
sendto
(
fast_ptr_request
(
ip
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
host
,
self
.
check_reply
,
callback
self
.
id
.
increment
()
def
check_reply
(
self
,
r
):
# we only need to check RCODE.
rcode
=
(
ord
(
r
[
3
])
&
0xf
)
self
.
log_info
(
'MAPS RBL; RCODE =%02x
\
n
%s'
%
(
rcode
,
repr
(
r
)))
return
0
,
rcode
# (ttl, answer)
class
hooked_callback
:
def
__init__
(
self
,
hook
,
callback
):
self
.
hook
,
self
.
callback
=
hook
,
callback
def
__call__
(
self
,
*
args
):
apply
(
self
.
hook
,
args
)
apply
(
self
.
callback
,
args
)
class
caching_resolver
(
resolver
):
"Cache DNS queries. Will need to honor the TTL value in the replies"
def
__init__
(
*
args
):
apply
(
resolver
.
__init__
,
args
)
self
=
args
[
0
]
self
.
cache
=
{}
self
.
forward_requests
=
counter
()
self
.
reverse_requests
=
counter
()
self
.
cache_hits
=
counter
()
def
resolve
(
self
,
host
,
callback
):
self
.
forward_requests
.
increment
()
if
self
.
cache
.
has_key
(
host
):
when
,
ttl
,
answer
=
self
.
cache
[
host
]
# ignore TTL for now
callback
(
host
,
ttl
,
answer
)
self
.
cache_hits
.
increment
()
else
:
resolver
.
resolve
(
self
,
host
,
hooked_callback
(
self
.
callback_hook
,
callback
)
)
def
resolve_ptr
(
self
,
host
,
callback
):
self
.
reverse_requests
.
increment
()
if
self
.
cache
.
has_key
(
host
):
when
,
ttl
,
answer
=
self
.
cache
[
host
]
# ignore TTL for now
callback
(
host
,
ttl
,
answer
)
self
.
cache_hits
.
increment
()
else
:
resolver
.
resolve_ptr
(
self
,
host
,
hooked_callback
(
self
.
callback_hook
,
callback
)
)
def
callback_hook
(
self
,
host
,
ttl
,
answer
):
self
.
cache
[
host
]
=
time
.
time
(),
ttl
,
answer
SERVER_IDENT
=
'Caching DNS Resolver (V%s)'
%
VERSION
def
status
(
self
):
import
status_handler
import
producers
return
producers
.
simple_producer
(
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
+
'<br>Server: %s'
%
self
.
server
+
'<br>Cache Entries: %d'
%
len
(
self
.
cache
)
+
'<br>Outstanding Requests: %d'
%
len
(
self
.
request_map
)
+
'<br>Forward Requests: %s'
%
self
.
forward_requests
+
'<br>Reverse Requests: %s'
%
self
.
reverse_requests
+
'<br>Cache Hits: %s'
%
self
.
cache_hits
)
#test_reply = """\000\000\205\200\000\001\000\001\000\002\000\002\006squirl\011nightmare\003com\000\000\001\000\001\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\011nightmare\003com\000\000\002\000\001\000\001Q\200\000\002\300\014\3006\000\002\000\001\000\001Q\200\000\015\003ns1\003iag\003net\000\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\300]\000\001\000\001\000\000\350\227\000\004\314\033\322\005"""
# def test_unpacker ():
# print unpack_address_reply (test_reply)
#
# import time
# class timer:
# def __init__ (self):
# self.start = time.time()
# def end (self):
# return time.time() - self.start
#
# # I get ~290 unpacks per second for the typical case, compared to ~48
# # using dnslib directly. also, that latter number does not include
# # picking the actual data out.
#
# def benchmark_unpacker():
#
# r = range(1000)
# t = timer()
# for i in r:
# unpack_address_reply (test_reply)
# print '%.2f unpacks per second' % (1000.0 / t.end())
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
==
1
:
print
'usage: %s [-r] [-s <server_IP>] host [host ...]'
%
sys
.
argv
[
0
]
sys
.
exit
(
0
)
elif
(
'-s'
in
sys
.
argv
):
i
=
sys
.
argv
.
index
(
'-s'
)
server
=
sys
.
argv
[
i
+
1
]
del
sys
.
argv
[
i
:
i
+
2
]
else
:
server
=
'127.0.0.1'
if
(
'-r'
in
sys
.
argv
):
reverse
=
1
i
=
sys
.
argv
.
index
(
'-r'
)
del
sys
.
argv
[
i
]
else
:
reverse
=
0
if
(
'-m'
in
sys
.
argv
):
maps
=
1
sys
.
argv
.
remove
(
'-m'
)
else
:
maps
=
0
if
maps
:
r
=
rbl
(
server
)
else
:
r
=
caching_resolver
(
server
)
count
=
len
(
sys
.
argv
)
-
1
def
print_it
(
host
,
ttl
,
answer
):
global
count
print
'%s: %s'
%
(
host
,
answer
)
count
=
count
-
1
if
not
count
:
r
.
close
()
for
host
in
sys
.
argv
[
1
:]:
if
reverse
:
r
.
resolve_ptr
(
host
,
print_it
)
elif
maps
:
r
.
resolve_maps
(
host
,
print_it
)
else
:
r
.
resolve
(
host
,
print_it
)
# hooked asyncore.loop()
while
asyncore
.
socket_map
:
asyncore
.
poll
(
30.0
)
print
'requests outstanding: %d'
%
len
(
r
.
request_map
)
lib/python/ZServer/medusa/status_handler.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING
=
"$Id: status_handler.py,v 1.9 2003/03/18 21:15:17 fdrake Exp $"
#
# medusa status extension
#
import
string
import
time
import
re
import
asyncore
import
http_server
import
medusa_gif
import
producers
from
counter
import
counter
START_TIME
=
long
(
time
.
time
())
class
status_extension
:
hit_counter
=
counter
()
def
__init__
(
self
,
objects
,
statusdir
=
'/status'
,
allow_emergency_debug
=
0
):
self
.
objects
=
objects
self
.
statusdir
=
statusdir
self
.
allow_emergency_debug
=
allow_emergency_debug
# We use /status instead of statusdir here because it's too
# hard to pass statusdir to the logger, who makes the HREF
# to the object dir. We don't need the security-through-
# obscurity here in any case, because the id is obscurity enough
self
.
hyper_regex
=
re
.
compile
(
'/status/object/([0-9]+)/.*'
)
self
.
hyper_objects
=
[]
for
object
in
objects
:
self
.
register_hyper_object
(
object
)
def
__repr__
(
self
):
return
'<Status Extension (%s hits) at %x>'
%
(
self
.
hit_counter
,
id
(
self
)
)
def
match
(
self
,
request
):
path
,
params
,
query
,
fragment
=
request
.
split_uri
()
# For reasons explained above, we don't use statusdir for /object
return
(
path
[:
len
(
self
.
statusdir
)]
==
self
.
statusdir
or
path
[:
len
(
"/status/object/"
)]
==
'/status/object/'
)
# Possible Targets:
# /status
# /status/channel_list
# /status/medusa.gif
# can we have 'clickable' objects?
# [yes, we can use id(x) and do a linear search]
# Dynamic producers:
# HTTP/1.0: we must close the channel, because it's dynamic output
# HTTP/1.1: we can use the chunked transfer-encoding, and leave
# it open.
def
handle_request
(
self
,
request
):
[
path
,
params
,
query
,
fragment
]
=
split_path
(
request
.
uri
)
self
.
hit_counter
.
increment
()
if
path
==
self
.
statusdir
:
# and not a subdirectory
up_time
=
string
.
join
(
english_time
(
long
(
time
.
time
())
-
START_TIME
))
request
[
'Content-Type'
]
=
'text/html'
request
.
push
(
'<html>'
'<title>Medusa Status Reports</title>'
'<body bgcolor="#ffffff">'
'<h1>Medusa Status Reports</h1>'
'<b>Up:</b> %s'
%
up_time
)
for
i
in
range
(
len
(
self
.
objects
)):
request
.
push
(
self
.
objects
[
i
].
status
())
request
.
push
(
'<hr>
\
r
\
n
'
)
request
.
push
(
'<p><a href="%s/channel_list">Channel List</a>'
'<hr>'
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
'</body></html>'
%
(
self
.
statusdir
,
self
.
statusdir
,
medusa_gif
.
width
,
medusa_gif
.
height
)
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/channel_list'
:
request
[
'Content-Type'
]
=
'text/html'
request
.
push
(
'<html><body>'
)
request
.
push
(
channel_list_producer
(
self
.
statusdir
))
request
.
push
(
'<hr>'
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
%
(
self
.
statusdir
,
medusa_gif
.
width
,
medusa_gif
.
height
)
+
'</body></html>'
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/medusa.gif'
:
request
[
'Content-Type'
]
=
'image/gif'
request
[
'Content-Length'
]
=
len
(
medusa_gif
.
data
)
request
.
push
(
medusa_gif
.
data
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/close_zombies'
:
message
=
(
'<h2>Closing all zombie http client connections...</h2>'
'<p><a href="%s">Back to the status page</a>'
%
self
.
statusdir
)
request
[
'Content-Type'
]
=
'text/html'
request
[
'Content-Length'
]
=
len
(
message
)
request
.
push
(
message
)
now
=
int
(
time
.
time
())
for
channel
in
asyncore
.
socket_map
.
keys
():
if
channel
.
__class__
==
http_server
.
http_channel
:
if
channel
!=
request
.
channel
:
if
(
now
-
channel
.
creation_time
)
>
channel
.
zombie_timeout
:
channel
.
close
()
request
.
done
()
# Emergency Debug Mode
# If a server is running away from you, don't KILL it!
# Move all the AF_INET server ports and perform an autopsy...
# [disabled by default to protect the innocent]
elif
self
.
allow_emergency_debug
and
path
==
self
.
statusdir
+
'/emergency_debug'
:
request
.
push
(
'<html>Moving All Servers...</html>'
)
request
.
done
()
for
channel
in
asyncore
.
socket_map
.
keys
():
if
channel
.
accepting
:
if
type
(
channel
.
addr
)
is
type
(()):
ip
,
port
=
channel
.
addr
channel
.
socket
.
close
()
channel
.
del_channel
()
channel
.
addr
=
(
ip
,
port
+
10000
)
fam
,
typ
=
channel
.
family_and_type
channel
.
create_socket
(
fam
,
typ
)
channel
.
set_reuse_addr
()
channel
.
bind
(
channel
.
addr
)
channel
.
listen
(
5
)
else
:
m
=
self
.
hyper_regex
.
match
(
path
)
if
m
:
oid
=
string
.
atoi
(
m
.
group
(
1
))
for
object
in
self
.
hyper_objects
:
if
id
(
object
)
==
oid
:
if
hasattr
(
object
,
'hyper_respond'
):
object
.
hyper_respond
(
self
,
path
,
request
)
else
:
request
.
error
(
404
)
return
def
status
(
self
):
return
producers
.
simple_producer
(
'<li>Status Extension <b>Hits</b> : %s'
%
self
.
hit_counter
)
def
register_hyper_object
(
self
,
object
):
if
not
object
in
self
.
hyper_objects
:
self
.
hyper_objects
.
append
(
object
)
import
logger
class
logger_for_status
(
logger
.
tail_logger
):
def
status
(
self
):
return
'Last %d log entries for: %s'
%
(
len
(
self
.
messages
),
html_repr
(
self
)
)
def
hyper_respond
(
self
,
sh
,
path
,
request
):
request
[
'Content-Type'
]
=
'text/plain'
messages
=
self
.
messages
[:]
messages
.
reverse
()
request
.
push
(
lines_producer
(
messages
))
request
.
done
()
class
lines_producer
:
def
__init__
(
self
,
lines
):
self
.
lines
=
lines
def
ready
(
self
):
return
len
(
self
.
lines
)
def
more
(
self
):
if
self
.
lines
:
chunk
=
self
.
lines
[:
50
]
self
.
lines
=
self
.
lines
[
50
:]
return
string
.
join
(
chunk
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
else
:
return
''
class
channel_list_producer
(
lines_producer
):
def
__init__
(
self
,
statusdir
):
channel_reprs
=
map
(
lambda
x
:
'<'
+
repr
(
x
)[
1
:
-
1
]
+
'>'
,
asyncore
.
socket_map
.
values
()
)
channel_reprs
.
sort
()
lines_producer
.
__init__
(
self
,
[
'<h1>Active Channel List</h1>'
,
'<pre>'
]
+
channel_reprs
+
[
'</pre>'
,
'<p><a href="%s">Status Report</a>'
%
statusdir
]
)
# this really needs a full-blown quoter...
def
sanitize
(
s
):
if
'<'
in
s
:
s
=
string
.
join
(
string
.
split
(
s
,
'<'
),
'<'
)
if
'>'
in
s
:
s
=
string
.
join
(
string
.
split
(
s
,
'>'
),
'>'
)
return
s
def
html_repr
(
object
):
so
=
sanitize
(
repr
(
object
))
if
hasattr
(
object
,
'hyper_respond'
):
return
'<a href="/status/object/%d/">%s</a>'
%
(
id
(
object
),
so
)
else
:
return
so
def
html_reprs
(
list
,
front
=
''
,
back
=
''
):
reprs
=
map
(
lambda
x
,
f
=
front
,
b
=
back
:
'%s%s%s'
%
(
f
,
x
,
b
),
map
(
lambda
x
:
sanitize
(
html_repr
(
x
)),
list
)
)
reprs
.
sort
()
return
reprs
# for example, tera, giga, mega, kilo
# p_d (n, (1024, 1024, 1024, 1024))
# smallest divider goes first - for example
# minutes, hours, days
# p_d (n, (60, 60, 24))
def
progressive_divide
(
n
,
parts
):
result
=
[]
for
part
in
parts
:
n
,
rem
=
divmod
(
n
,
part
)
result
.
append
(
rem
)
result
.
append
(
n
)
return
result
# b,k,m,g,t
def
split_by_units
(
n
,
units
,
dividers
,
format_string
):
divs
=
progressive_divide
(
n
,
dividers
)
result
=
[]
for
i
in
range
(
len
(
units
)):
if
divs
[
i
]:
result
.
append
(
format_string
%
(
divs
[
i
],
units
[
i
]))
result
.
reverse
()
if
not
result
:
return
[
format_string
%
(
0
,
units
[
0
])]
else
:
return
result
def
english_bytes
(
n
):
return
split_by_units
(
n
,
(
''
,
'K'
,
'M'
,
'G'
,
'T'
),
(
1024
,
1024
,
1024
,
1024
,
1024
),
'%d %sB'
)
def
english_time
(
n
):
return
split_by_units
(
n
,
(
'secs'
,
'mins'
,
'hours'
,
'days'
,
'weeks'
,
'years'
),
(
60
,
60
,
24
,
7
,
52
),
'%d %s'
)
lib/python/ZServer/medusa/test/__init__.py
0 → 100644
View file @
c658c172
# make test appear as a package
lib/python/ZServer/medusa/test/asyn_http_bench.py
0 → 100755
View file @
c658c172
#! /usr/local/bin/python1.4
# -*- Mode: Python; tab-width: 4 -*-
import
asyncore
import
socket
import
string
import
sys
def
blurt
(
thing
):
sys
.
stdout
.
write
(
thing
)
sys
.
stdout
.
flush
()
total_sessions
=
0
class
http_client
(
asyncore
.
dispatcher_with_send
):
def
__init__
(
self
,
host
=
'127.0.0.1'
,
port
=
80
,
uri
=
'/'
,
num
=
10
):
asyncore
.
dispatcher_with_send
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
host
=
host
self
.
port
=
port
self
.
uri
=
uri
self
.
num
=
num
self
.
bytes
=
0
self
.
connect
((
host
,
port
))
def
log
(
self
,
*
info
):
pass
def
handle_connect
(
self
):
self
.
connected
=
1
# blurt ('o')
self
.
send
(
'GET %s HTTP/1.0
\
r
\
n
\
r
\
n
'
%
self
.
uri
)
def
handle_read
(
self
):
# blurt ('.')
d
=
self
.
recv
(
8192
)
self
.
bytes
=
self
.
bytes
+
len
(
d
)
def
handle_close
(
self
):
global
total_sessions
# blurt ('(%d)' % (self.bytes))
self
.
close
()
total_sessions
=
total_sessions
+
1
if
self
.
num
:
http_client
(
self
.
host
,
self
.
port
,
self
.
uri
,
self
.
num
-
1
)
import
time
class
timer
:
def
__init__
(
self
):
self
.
start
=
time
.
time
()
def
end
(
self
):
return
time
.
time
()
-
self
.
start
from
asyncore
import
socket_map
,
poll
MAX
=
0
def
loop
(
timeout
=
30.0
):
global
MAX
while
socket_map
:
if
len
(
socket_map
)
>
MAX
:
MAX
=
len
(
socket_map
)
poll
(
timeout
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
<
6
:
print
'usage: %s <host> <port> <uri> <hits> <num_clients>'
%
sys
.
argv
[
0
]
else
:
[
host
,
port
,
uri
,
hits
,
num
]
=
sys
.
argv
[
1
:]
hits
=
string
.
atoi
(
hits
)
num
=
string
.
atoi
(
num
)
port
=
string
.
atoi
(
port
)
t
=
timer
()
clients
=
map
(
lambda
x
:
http_client
(
host
,
port
,
uri
,
hits
-
1
),
range
(
num
))
#import profile
#profile.run ('loop')
loop
()
total_time
=
t
.
end
()
print
(
'
\
n
%d clients
\
n
%d hits/client
\
n
'
'total_hits:%d
\
n
%.3f seconds
\
n
total hits/sec:%.3f'
%
(
num
,
hits
,
total_sessions
,
total_time
,
total_sessions
/
total_time
)
)
print
'Max. number of concurrent sessions: %d'
%
(
MAX
)
# linux 2.x, talking to medusa
# 50 clients
# 1000 hits/client
# total_hits:50000
# 2255.858 seconds
# total hits/sec:22.165
# Max. number of concurrent sessions: 50
lib/python/ZServer/medusa/test/max_sockets.py
0 → 100644
View file @
c658c172
import
socket
import
select
# several factors here we might want to test:
# 1) max we can create
# 2) max we can bind
# 3) max we can listen on
# 4) max we can connect
def
max_server_sockets
():
sl
=
[]
while
1
:
try
:
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
bind
((
''
,
0
))
s
.
listen
(
5
)
sl
.
append
(
s
)
except
:
break
num
=
len
(
sl
)
for
s
in
sl
:
s
.
close
()
del
sl
return
num
def
max_client_sockets
():
# make a server socket
server
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
server
.
bind
((
''
,
9999
))
server
.
listen
(
5
)
sl
=
[]
while
1
:
try
:
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
((
''
,
9999
))
conn
,
addr
=
server
.
accept
()
sl
.
append
((
s
,
conn
))
except
:
break
num
=
len
(
sl
)
for
s
,
c
in
sl
:
s
.
close
()
c
.
close
()
del
sl
return
num
def
max_select_sockets
():
sl
=
[]
while
1
:
try
:
num
=
len
(
sl
)
for
i
in
range
(
1
+
len
(
sl
)
*
0.05
):
# Increase exponentially.
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
bind
((
''
,
0
))
s
.
listen
(
5
)
sl
.
append
(
s
)
select
.
select
(
sl
,[],[],
0
)
except
:
break
for
s
in
sl
:
s
.
close
()
del
sl
return
num
lib/python/ZServer/medusa/test/test_11.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
import
asyncore
import
asynchat
import
socket
import
string
# get some performance figures for an HTTP/1.1 server.
# use pipelining.
class
test_client
(
asynchat
.
async_chat
):
ac_in_buffer_size
=
16384
ac_out_buffer_size
=
16384
total_in
=
0
concurrent
=
0
max_concurrent
=
0
def
__init__
(
self
,
addr
,
chain
):
asynchat
.
async_chat
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
self
.
connect
(
addr
)
self
.
push
(
chain
)
def
handle_connect
(
self
):
test_client
.
concurrent
=
test_client
.
concurrent
+
1
if
(
test_client
.
concurrent
>
test_client
.
max_concurrent
):
test_client
.
max_concurrent
=
test_client
.
concurrent
def
handle_expt
(
self
):
print
'unexpected FD_EXPT thrown. closing()'
self
.
close
()
def
close
(
self
):
test_client
.
concurrent
=
test_client
.
concurrent
-
1
asynchat
.
async_chat
.
close
(
self
)
def
collect_incoming_data
(
self
,
data
):
test_client
.
total_in
=
test_client
.
total_in
+
len
(
data
)
def
found_terminator
(
self
):
pass
def
log
(
self
,
*
args
):
pass
import
time
class
timer
:
def
__init__
(
self
):
self
.
start
=
time
.
time
()
def
end
(
self
):
return
time
.
time
()
-
self
.
start
def
build_request_chain
(
num
,
host
,
request_size
):
s
=
'GET /test%d.html HTTP/1.1
\
r
\
n
Host: %s
\
r
\
n
\
r
\
n
'
%
(
request_size
,
host
)
sl
=
[
s
]
*
(
num
-
1
)
sl
.
append
(
'GET /test%d.html HTTP/1.1
\
r
\
n
Host: %s
\
r
\
n
Connection: close
\
r
\
n
\
r
\
n
'
%
(
request_size
,
host
)
)
return
string
.
join
(
sl
,
''
)
if
__name__
==
'__main__'
:
import
string
import
sys
if
len
(
sys
.
argv
)
!=
6
:
print
'usage: %s <host> <port> <request-size> <num-requests> <num-connections>
\
n
'
%
sys
.
argv
[
0
]
else
:
host
=
sys
.
argv
[
1
]
ip
=
socket
.
gethostbyname
(
host
)
[
port
,
request_size
,
num_requests
,
num_conns
]
=
map
(
string
.
atoi
,
sys
.
argv
[
2
:]
)
chain
=
build_request_chain
(
num_requests
,
host
,
request_size
)
t
=
timer
()
for
i
in
range
(
num_conns
):
test_client
((
host
,
port
),
chain
)
asyncore
.
loop
()
total_time
=
t
.
end
()
# ok, now do some numbers
total_bytes
=
test_client
.
total_in
num_trans
=
num_requests
*
num_conns
throughput
=
float
(
total_bytes
)
/
total_time
trans_per_sec
=
num_trans
/
total_time
sys
.
stderr
.
write
(
'total time: %.2f
\
n
'
%
total_time
)
sys
.
stderr
.
write
(
'number of transactions: %d
\
n
'
%
num_trans
)
sys
.
stderr
.
write
(
'total bytes sent: %d
\
n
'
%
total_bytes
)
sys
.
stderr
.
write
(
'total throughput (bytes/sec): %.2f
\
n
'
%
throughput
)
sys
.
stderr
.
write
(
'transactions/second: %.2f
\
n
'
%
trans_per_sec
)
sys
.
stderr
.
write
(
'max concurrent connections: %d
\
n
'
%
test_client
.
max_concurrent
)
sys
.
stdout
.
write
(
string
.
join
(
map
(
str
,
(
num_conns
,
num_requests
,
request_size
,
throughput
,
trans_per_sec
)),
','
)
+
'
\
n
'
)
lib/python/ZServer/medusa/test/test_lb.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# Get a lower bound for Medusa performance with a simple async
# client/server benchmark built on the async lib. The idea is to test
# all the underlying machinery [select, asyncore, asynchat, etc...] in
# a context where there is virtually no processing of the data.
import
socket
import
select
import
sys
# ==================================================
# server
# ==================================================
import
asyncore
import
asynchat
class
test_channel
(
asynchat
.
async_chat
):
ac_in_buffer_size
=
16384
ac_out_buffer_size
=
16384
total_in
=
0
def
__init__
(
self
,
conn
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
self
.
buffer
=
''
def
collect_incoming_data
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
test_channel
.
total_in
=
test_channel
.
total_in
+
len
(
data
)
def
found_terminator
(
self
):
# we've gotten the data, now send it back
data
=
self
.
buffer
self
.
buffer
=
''
self
.
push
(
data
+
'
\
r
\
n
\
r
\
n
'
)
def
handle_close
(
self
):
sys
.
stdout
.
write
(
'.'
);
sys
.
stdout
.
flush
()
self
.
close
()
def
log
(
self
,
*
args
):
pass
class
test_server
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
addr
):
if
type
(
addr
)
==
type
(
''
):
f
=
socket
.
AF_UNIX
else
:
f
=
socket
.
AF_INET
self
.
create_socket
(
f
,
socket
.
SOCK_STREAM
)
self
.
bind
(
addr
)
self
.
listen
(
5
)
print
'server started on'
,
addr
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
test_channel
(
conn
,
addr
)
# ==================================================
# client
# ==================================================
# pretty much the same behavior, except that we kick
# off the exchange and decide when to quit
class
test_client
(
test_channel
):
def
__init__
(
self
,
addr
,
packet
,
number
):
if
type
(
addr
)
==
type
(
''
):
f
=
socket
.
AF_UNIX
else
:
f
=
socket
.
AF_INET
asynchat
.
async_chat
.
__init__
(
self
)
self
.
create_socket
(
f
,
socket
.
SOCK_STREAM
)
self
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
self
.
buffer
=
''
self
.
connect
(
addr
)
self
.
push
(
packet
+
'
\
r
\
n
\
r
\
n
'
)
self
.
number
=
number
self
.
count
=
0
def
handle_connect
(
self
):
pass
def
found_terminator
(
self
):
self
.
count
=
self
.
count
+
1
if
self
.
count
==
self
.
number
:
sys
.
stdout
.
write
(
'.'
);
sys
.
stdout
.
flush
()
self
.
close
()
else
:
test_channel
.
found_terminator
(
self
)
import
time
class
timer
:
def
__init__
(
self
):
self
.
start
=
time
.
time
()
def
end
(
self
):
return
time
.
time
()
-
self
.
start
if
__name__
==
'__main__'
:
import
string
if
'--poll'
in
sys
.
argv
:
sys
.
argv
.
remove
(
'--poll'
)
use_poll
=
1
else
:
use_poll
=
0
if
len
(
sys
.
argv
)
==
1
:
print
'usage: %s
\
n
'
\
' (as a server) [--poll] -s <ip> <port>
\
n
'
\
' (as a client) [--poll] -c <ip> <port> <packet-size> <num-packets> <num-connections>
\
n
'
%
sys
.
argv
[
0
]
sys
.
exit
(
0
)
if
sys
.
argv
[
1
]
==
'-s'
:
s
=
test_server
((
sys
.
argv
[
2
],
string
.
atoi
(
sys
.
argv
[
3
])))
asyncore
.
loop
(
use_poll
=
use_poll
)
elif
sys
.
argv
[
1
]
==
'-c'
:
# create the packet
packet
=
string
.
atoi
(
sys
.
argv
[
4
])
*
'B'
host
=
sys
.
argv
[
2
]
port
=
string
.
atoi
(
sys
.
argv
[
3
])
num_packets
=
string
.
atoi
(
sys
.
argv
[
5
])
num_conns
=
string
.
atoi
(
sys
.
argv
[
6
])
t
=
timer
()
for
i
in
range
(
num_conns
):
test_client
((
host
,
port
),
packet
,
num_packets
)
asyncore
.
loop
(
use_poll
=
use_poll
)
total_time
=
t
.
end
()
# ok, now do some numbers
bytes
=
test_client
.
total_in
num_trans
=
num_packets
*
num_conns
total_bytes
=
num_trans
*
len
(
packet
)
throughput
=
float
(
total_bytes
)
/
total_time
trans_per_sec
=
num_trans
/
total_time
sys
.
stderr
.
write
(
'total time: %.2f
\
n
'
%
total_time
)
sys
.
stderr
.
write
(
'number of transactions: %d
\
n
'
%
num_trans
)
sys
.
stderr
.
write
(
'total bytes sent: %d
\
n
'
%
total_bytes
)
sys
.
stderr
.
write
(
'total throughput (bytes/sec): %.2f
\
n
'
%
throughput
)
sys
.
stderr
.
write
(
' [note, throughput is this amount in each direction]
\
n
'
)
sys
.
stderr
.
write
(
'transactions/second: %.2f
\
n
'
%
trans_per_sec
)
sys
.
stdout
.
write
(
string
.
join
(
map
(
str
,
(
num_conns
,
num_packets
,
len
(
packet
),
throughput
,
trans_per_sec
)),
','
)
+
'
\
n
'
)
lib/python/ZServer/medusa/test/test_medusa.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
import
socket
import
string
import
time
import
http_date
now
=
http_date
.
build_http_date
(
time
.
time
())
cache_request
=
string
.
joinfields
(
[
'GET / HTTP/1.0'
,
'If-Modified-Since: %s'
%
now
,
],
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
nocache_request
=
'GET / HTTP/1.0
\
r
\
n
\
r
\
n
'
def
get
(
request
,
host
=
''
,
port
=
80
):
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
(
host
,
port
)
s
.
send
(
request
)
while
1
:
d
=
s
.
recv
(
8192
)
if
not
d
:
break
s
.
close
()
class
timer
:
def
__init__
(
self
):
self
.
start
=
time
.
time
()
def
end
(
self
):
return
time
.
time
()
-
self
.
start
def
test_cache
(
n
=
1000
):
t
=
timer
()
for
i
in
xrange
(
n
):
get
(
cache_request
)
end
=
t
.
end
()
print
'cache: %d requests, %.2f seconds, %.2f hits/sec'
%
(
n
,
end
,
n
/
end
)
def
test_nocache
(
n
=
1000
):
t
=
timer
()
for
i
in
xrange
(
n
):
get
(
nocache_request
)
end
=
t
.
end
()
print
'nocache: %d requests, %.2f seconds, %.2f hits/sec'
%
(
n
,
end
,
n
/
end
)
if
__name__
==
'__main__'
:
test_cache
()
test_nocache
()
lib/python/ZServer/medusa/test/test_single_11.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# no-holds barred, test a single channel's pipelining speed
import
string
import
socket
def
build_request_chain
(
num
,
host
,
request_size
):
s
=
'GET /test%d.html HTTP/1.1
\
r
\
n
Host: %s
\
r
\
n
\
r
\
n
'
%
(
request_size
,
host
)
sl
=
[
s
]
*
(
num
-
1
)
sl
.
append
(
'GET /test%d.html HTTP/1.1
\
r
\
n
Host: %s
\
r
\
n
Connection: close
\
r
\
n
\
r
\
n
'
%
(
request_size
,
host
)
)
return
string
.
join
(
sl
,
''
)
import
time
class
timer
:
def
__init__
(
self
):
self
.
start
=
time
.
time
()
def
end
(
self
):
return
time
.
time
()
-
self
.
start
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
!=
5
:
print
'usage: %s <host> <port> <request-size> <num-requests>'
%
(
sys
.
argv
[
0
])
else
:
host
=
sys
.
argv
[
1
]
[
port
,
request_size
,
num_requests
]
=
map
(
string
.
atoi
,
sys
.
argv
[
2
:]
)
chain
=
build_request_chain
(
num_requests
,
host
,
request_size
)
import
socket
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
((
host
,
port
))
t
=
timer
()
s
.
send
(
chain
)
num_bytes
=
0
while
1
:
data
=
s
.
recv
(
16384
)
if
not
data
:
break
else
:
num_bytes
=
num_bytes
+
len
(
data
)
total_time
=
t
.
end
()
print
'total bytes received: %d'
%
num_bytes
print
'total time: %.2f sec'
%
(
total_time
)
print
'transactions/sec: %.2f'
%
(
num_requests
/
total_time
)
lib/python/ZServer/medusa/test/tests.txt
0 → 100644
View file @
c658c172
# server: linux, 486dx2/66
# client: win95, cyrix 6x86 p166+
# over ethernet.
#
# number of connections
# | number of requests per connection
# | | packet size
# | | | throughput (bytes/sec)
# | | | | transactions/sec
# | | | | |
1 50 64 3440.86 53.76
1 100 64 3422.45 53.47
1 1 256 5120.00 20.00
1 50 256 13763.44 53.76
1 100 256 13333.33 52.08
1 1 1024 6400.00 6.25
1 50 1024 6909.58 6.74
1 100 1024 6732.41 6.57
1 1 4096 14628.56 3.57
1 50 4096 17181.20 4.19
1 100 4096 16835.18 4.11
5 1 64 1882.35 29.41
5 50 64 3990.02 62.34
5 100 64 3907.20 61.05
5 1 256 5818.18 22.72
5 50 256 15533.98 60.67
5 100 256 15744.15 61.50
5 1 1024 15515.14 15.15
5 50 1024 23188.40 22.64
5 100 1024 23659.88 23.10
5 1 4096 28444.44 6.94
5 50 4096 34913.05 8.52
5 100 4096 35955.05 8.77
10 1 64 191.04 2.98
10 50 64 4045.51 63.21
10 100 64 4045.51 63.21
10 1 256 764.17 2.98
10 50 256 15552.85 60.75
10 100 256 15581.25 60.86
10 1 1024 2959.53 2.89
10 50 1024 25061.18 24.47
10 100 1024 25498.00 24.90
10 1 4096 11314.91 2.76
10 50 4096 39002.09 9.52
10 100 4096 38780.53 9.46
15 1 64 277.45 4.33
15 50 64 4067.79 63.55
15 100 64 4083.36 63.80
15 1 256 386.31 1.50
15 50 256 15262.32 59.61
15 100 256 15822.00 61.80
15 1 1024 1528.35 1.49
15 50 1024 27263.04 26.62
15 100 1024 27800.90 27.14
15 1 4096 6047.24 1.47
15 50 4096 39695.05 9.69
15 100 4096 37112.65 9.06
20 1 64 977.09 15.26
20 50 64 2538.67 39.66
20 100 64 3377.30 52.77
20 1 256 221.93 0.86
20 50 256 10815.37 42.24
20 100 256 15880.89 62.03
20 1 1024 883.52 0.86
20 50 1024 29315.77 28.62
20 100 1024 29569.73 28.87
20 1 4096 7892.10 1.92
20 50 4096 40223.90 9.82
20 100 4096 41325.73 10.08
#
# There's a big gap in trans/sec between 256 and 1024 bytes, we should
# probably stick a 512 in there.
#
lib/python/ZServer/medusa/thread/__init__.py
0 → 100644
View file @
c658c172
# make thread to appear as a package
lib/python/ZServer/medusa/thread/pi_module.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
# [reworking of the version in Python-1.5.1/Demo/scripts/pi.py]
# Print digits of pi forever.
#
# The algorithm, using Python's 'long' integers ("bignums"), works
# with continued fractions, and was conceived by Lambert Meertens.
#
# See also the ABC Programmer's Handbook, by Geurts, Meertens & Pemberton,
# published by Prentice-Hall (UK) Ltd., 1990.
import
string
StopException
=
"Stop!"
def
go
(
file
):
try
:
k
,
a
,
b
,
a1
,
b1
=
2L
,
4L
,
1L
,
12L
,
4L
while
1
:
# Next approximation
p
,
q
,
k
=
k
*
k
,
2L
*
k
+
1L
,
k
+
1L
a
,
b
,
a1
,
b1
=
a1
,
b1
,
p
*
a
+
q
*
a1
,
p
*
b
+
q
*
b1
# Print common digits
d
,
d1
=
a
/
b
,
a1
/
b1
while
d
==
d1
:
if
file
.
write
(
str
(
int
(
d
))):
raise
StopException
a
,
a1
=
10L
*
(
a
%
b
),
10L
*
(
a1
%
b1
)
d
,
d1
=
a
/
b
,
a1
/
b1
except
StopException
:
return
class
line_writer
:
"partition the endless line into 80-character ones"
def
__init__
(
self
,
file
,
digit_limit
=
10000
):
self
.
file
=
file
self
.
buffer
=
''
self
.
count
=
0
self
.
digit_limit
=
digit_limit
def
write
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
if
len
(
self
.
buffer
)
>
80
:
line
,
self
.
buffer
=
self
.
buffer
[:
80
],
self
.
buffer
[
80
:]
self
.
file
.
write
(
line
+
'
\
r
\
n
'
)
self
.
count
=
self
.
count
+
80
if
self
.
count
>
self
.
digit_limit
:
return
1
else
:
return
0
def
main
(
env
,
stdin
,
stdout
):
parts
=
string
.
split
(
env
[
'REQUEST_URI'
],
'/'
)
if
len
(
parts
)
>=
3
:
ndigits
=
string
.
atoi
(
parts
[
2
])
else
:
ndigits
=
5000
stdout
.
write
(
'Content-Type: text/plain
\
r
\
n
\
r
\
n
'
)
go
(
line_writer
(
stdout
,
ndigits
))
lib/python/ZServer/medusa/thread/select_trigger.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING
=
"$Id: select_trigger.py,v 1.6 2003/03/18 21:17:03 fdrake Exp $"
import
asyncore
import
asynchat
import
os
import
socket
import
string
import
thread
if
os
.
name
==
'posix'
:
class
trigger
(
asyncore
.
file_dispatcher
):
"Wake up a call to select() running in the main thread"
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
def
__init__
(
self
):
r
,
w
=
os
.
pipe
()
self
.
trigger
=
w
asyncore
.
file_dispatcher
.
__init__
(
self
,
r
)
self
.
lock
=
thread
.
allocate_lock
()
self
.
thunks
=
[]
def
__repr__
(
self
):
return
'<select-trigger (pipe) at %x>'
%
id
(
self
)
def
readable
(
self
):
return
1
def
writable
(
self
):
return
0
def
handle_connect
(
self
):
pass
def
pull_trigger
(
self
,
thunk
=
None
):
# print 'PULL_TRIGGER: ', len(self.thunks)
if
thunk
:
try
:
self
.
lock
.
acquire
()
self
.
thunks
.
append
(
thunk
)
finally
:
self
.
lock
.
release
()
os
.
write
(
self
.
trigger
,
'x'
)
def
handle_read
(
self
):
self
.
recv
(
8192
)
try
:
self
.
lock
.
acquire
()
for
thunk
in
self
.
thunks
:
try
:
thunk
()
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
print
'exception in trigger thunk: (%s:%s %s)'
%
(
t
,
v
,
tbinfo
)
self
.
thunks
=
[]
finally
:
self
.
lock
.
release
()
else
:
# win32-safe version
class
trigger
(
asyncore
.
dispatcher
):
address
=
(
'127.9.9.9'
,
19999
)
def
__init__
(
self
):
a
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
w
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
# set TCP_NODELAY to true to avoid buffering
w
.
setsockopt
(
socket
.
IPPROTO_TCP
,
1
,
1
)
# tricky: get a pair of connected sockets
host
=
'127.0.0.1'
port
=
19999
while
1
:
try
:
self
.
address
=
(
host
,
port
)
a
.
bind
(
self
.
address
)
break
except
:
if
port
<=
19950
:
raise
'Bind Error'
,
'Cannot bind trigger!'
port
=
port
-
1
a
.
listen
(
1
)
w
.
setblocking
(
0
)
try
:
w
.
connect
(
self
.
address
)
except
:
pass
r
,
addr
=
a
.
accept
()
a
.
close
()
w
.
setblocking
(
1
)
self
.
trigger
=
w
asyncore
.
dispatcher
.
__init__
(
self
,
r
)
self
.
lock
=
thread
.
allocate_lock
()
self
.
thunks
=
[]
self
.
_trigger_connected
=
0
def
__repr__
(
self
):
return
'<select-trigger (loopback) at %x>'
%
id
(
self
)
def
readable
(
self
):
return
1
def
writable
(
self
):
return
0
def
handle_connect
(
self
):
pass
def
pull_trigger
(
self
,
thunk
=
None
):
if
thunk
:
try
:
self
.
lock
.
acquire
()
self
.
thunks
.
append
(
thunk
)
finally
:
self
.
lock
.
release
()
self
.
trigger
.
send
(
'x'
)
def
handle_read
(
self
):
self
.
recv
(
8192
)
try
:
self
.
lock
.
acquire
()
for
thunk
in
self
.
thunks
:
try
:
thunk
()
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
print
'exception in trigger thunk: (%s:%s %s)'
%
(
t
,
v
,
tbinfo
)
self
.
thunks
=
[]
finally
:
self
.
lock
.
release
()
the_trigger
=
None
class
trigger_file
:
"A 'triggered' file object"
buffer_size
=
4096
def
__init__
(
self
,
parent
):
global
the_trigger
if
the_trigger
is
None
:
the_trigger
=
trigger
()
self
.
parent
=
parent
self
.
buffer
=
''
def
write
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
if
len
(
self
.
buffer
)
>
self
.
buffer_size
:
d
,
self
.
buffer
=
self
.
buffer
,
''
the_trigger
.
pull_trigger
(
lambda
d
=
d
,
p
=
self
.
parent
:
p
.
push
(
d
)
)
def
writeline
(
self
,
line
):
self
.
write
(
line
+
'
\
r
\
n
'
)
def
writelines
(
self
,
lines
):
self
.
write
(
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
)
def
flush
(
self
):
if
self
.
buffer
:
d
,
self
.
buffer
=
self
.
buffer
,
''
the_trigger
.
pull_trigger
(
lambda
p
=
self
.
parent
,
d
=
d
:
p
.
push
(
d
)
)
def
softspace
(
self
,
*
args
):
pass
def
close
(
self
):
# in a derived class, you may want to call trigger_close() instead.
self
.
flush
()
self
.
parent
=
None
def
trigger_close
(
self
):
d
,
self
.
buffer
=
self
.
buffer
,
''
p
,
self
.
parent
=
self
.
parent
,
None
the_trigger
.
pull_trigger
(
lambda
p
=
p
,
d
=
d
:
(
p
.
push
(
d
),
p
.
close_when_done
())
)
if
__name__
==
'__main__'
:
import
time
def
thread_function
(
output_file
,
i
,
n
):
print
'entering thread_function'
while
n
:
time
.
sleep
(
5
)
output_file
.
write
(
'%2d.%2d %s
\
r
\
n
'
%
(
i
,
n
,
output_file
))
output_file
.
flush
()
n
=
n
-
1
output_file
.
close
()
print
'exiting thread_function'
class
thread_parent
(
asynchat
.
async_chat
):
def
__init__
(
self
,
conn
,
addr
):
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
buffer
=
''
self
.
count
=
0
def
collect_incoming_data
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
def
found_terminator
(
self
):
data
,
self
.
buffer
=
self
.
buffer
,
''
if
not
data
:
asyncore
.
close_all
()
print
"done"
return
n
=
string
.
atoi
(
string
.
split
(
data
)[
0
])
tf
=
trigger_file
(
self
)
self
.
count
=
self
.
count
+
1
thread
.
start_new_thread
(
thread_function
,
(
tf
,
self
.
count
,
n
))
class
thread_server
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
family
=
socket
.
AF_INET
,
address
=
(
''
,
9003
)):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
family
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
(
address
)
self
.
listen
(
5
)
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
tp
=
thread_parent
(
conn
,
addr
)
thread_server
()
#asyncore.loop(1.0, use_poll=1)
try
:
asyncore
.
loop
()
except
:
asyncore
.
close_all
()
lib/python/ZServer/medusa/thread/test_module.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
import
pprint
def
main
(
env
,
stdin
,
stdout
):
stdout
.
write
(
'<html><body><h1>Test CGI Module</h1>
\
r
\
n
'
'<br>The Environment:<pre>
\
r
\
n
'
)
pprint
.
pprint
(
env
,
stdout
)
stdout
.
write
(
'</pre></body></html>
\
r
\
n
'
)
lib/python/ZServer/medusa/thread/thread_channel.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING
=
"$Id: thread_channel.py,v 1.6 2003/03/18 21:17:03 fdrake Exp $"
# This will probably only work on Unix.
# The disadvantage to this technique is that it wastes file
# descriptors (especially when compared to select_trigger.py)
# May be possible to do it on Win32, using TCP localhost sockets.
# [does winsock support 'socketpair'?]
import
asyncore
import
asynchat
import
fcntl
import
os
import
socket
import
string
import
thread
try
:
from
fcntl
import
F_GETFL
,
F_SETFL
,
O_NDELAY
except
ImportError
:
from
FCNTL
import
F_GETFL
,
F_SETFL
,
O_NDELAY
# this channel slaves off of another one. it starts a thread which
# pumps its output through the 'write' side of the pipe. The 'read'
# side of the pipe will then notify us when data is ready. We push
# this data on the owning data channel's output queue.
class
thread_channel
(
asyncore
.
file_dispatcher
):
buffer_size
=
8192
def
__init__
(
self
,
channel
,
function
,
*
args
):
self
.
parent
=
channel
self
.
function
=
function
self
.
args
=
args
self
.
pipe
=
rfd
,
wfd
=
os
.
pipe
()
asyncore
.
file_dispatcher
.
__init__
(
self
,
rfd
)
def
start
(
self
):
rfd
,
wfd
=
self
.
pipe
# The read side of the pipe is set to non-blocking I/O; it is
# 'owned' by medusa.
flags
=
fcntl
.
fcntl
(
rfd
,
F_GETFL
,
0
)
fcntl
.
fcntl
(
rfd
,
F_SETFL
,
flags
|
O_NDELAY
)
# The write side of the pipe is left in blocking mode; it is
# 'owned' by the thread. However, we wrap it up as a file object.
# [who wants to 'write()' to a number?]
of
=
os
.
fdopen
(
wfd
,
'w'
)
thread
.
start_new_thread
(
self
.
function
,
# put the output file in front of the other arguments
(
of
,)
+
self
.
args
)
def
writable
(
self
):
return
0
def
readable
(
self
):
return
1
def
handle_read
(
self
):
data
=
self
.
recv
(
self
.
buffer_size
)
self
.
parent
.
push
(
data
)
def
handle_close
(
self
):
# Depending on your intentions, you may want to close
# the parent channel here.
self
.
close
()
# Yeah, it's bad when the test code is bigger than the library code.
if
__name__
==
'__main__'
:
import
time
def
thread_function
(
output_file
,
i
,
n
):
print
'entering thread_function'
while
n
:
time
.
sleep
(
5
)
output_file
.
write
(
'%2d.%2d %s
\
r
\
n
'
%
(
i
,
n
,
output_file
))
output_file
.
flush
()
n
=
n
-
1
output_file
.
close
()
print
'exiting thread_function'
class
thread_parent
(
asynchat
.
async_chat
):
def
__init__
(
self
,
conn
,
addr
):
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
buffer
=
''
self
.
count
=
0
def
collect_incoming_data
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
def
found_terminator
(
self
):
data
,
self
.
buffer
=
self
.
buffer
,
''
n
=
string
.
atoi
(
string
.
split
(
data
)[
0
])
tc
=
thread_channel
(
self
,
thread_function
,
self
.
count
,
n
)
self
.
count
=
self
.
count
+
1
tc
.
start
()
class
thread_server
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
family
=
socket
.
AF_INET
,
address
=
(
'127.0.0.1'
,
9003
)):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
family
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
(
address
)
self
.
listen
(
5
)
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
tp
=
thread_parent
(
conn
,
addr
)
thread_server
()
#asyncore.loop(1.0, use_poll=1)
asyncore
.
loop
()
lib/python/ZServer/medusa/thread/thread_handler.py
0 → 100644
View file @
c658c172
# -*- Mode: Python; tab-width: 4 -*-
import
re
import
string
import
StringIO
import
sys
import
os
import
sys
import
time
import
counter
import
select_trigger
import
producers
from
default_handler
import
split_path
,
unquote
,
get_header
import
fifo
import
threading
class
request_queue
:
def
__init__
(
self
):
self
.
mon
=
threading
.
RLock
()
self
.
cv
=
threading
.
Condition
(
self
.
mon
)
self
.
queue
=
fifo
.
fifo
()
def
put
(
self
,
item
):
self
.
cv
.
acquire
()
self
.
queue
.
push
(
item
)
self
.
cv
.
notify
()
self
.
cv
.
release
()
def
get
(
self
):
self
.
cv
.
acquire
()
while
not
self
.
queue
:
self
.
cv
.
wait
()
result
=
self
.
queue
.
pop
()
self
.
cv
.
release
()
return
result
header2env
=
{
'Content-Length'
:
'CONTENT_LENGTH'
,
'Content-Type'
:
'CONTENT_TYPE'
,
'Referer'
:
'HTTP_REFERER'
,
'User-Agent'
:
'HTTP_USER_AGENT'
,
'Accept'
:
'HTTP_ACCEPT'
,
'Accept-Charset'
:
'HTTP_ACCEPT_CHARSET'
,
'Accept-Language'
:
'HTTP_ACCEPT_LANGUAGE'
,
'Host'
:
'HTTP_HOST'
,
'Connection'
:
'CONNECTION_TYPE'
,
'Authorization'
:
'HTTP_AUTHORIZATION'
,
'Cookie'
:
'HTTP_COOKIE'
,
}
# convert keys to lower case for case-insensitive matching
for
(
key
,
value
)
in
header2env
.
items
():
del
header2env
[
key
]
key
=
string
.
lower
(
key
)
header2env
[
key
]
=
value
class
thread_output_file
(
select_trigger
.
trigger_file
):
def
close
(
self
):
self
.
trigger_close
()
class
script_handler
:
def
__init__
(
self
,
queue
,
document_root
=
""
):
self
.
modules
=
{}
self
.
document_root
=
document_root
self
.
queue
=
queue
def
add_module
(
self
,
module
,
*
names
):
if
not
names
:
names
=
[
"/%s"
%
module
.
__name__
]
for
name
in
names
:
self
.
modules
[
'/'
+
name
]
=
module
def
match
(
self
,
request
):
uri
=
request
.
uri
i
=
string
.
find
(
uri
,
"/"
,
1
)
if
i
!=
-
1
:
uri
=
uri
[:
i
]
i
=
string
.
find
(
uri
,
"?"
,
1
)
if
i
!=
-
1
:
uri
=
uri
[:
i
]
if
self
.
modules
.
has_key
(
uri
):
request
.
module
=
self
.
modules
[
uri
]
return
1
else
:
return
0
def
handle_request
(
self
,
request
):
[
path
,
params
,
query
,
fragment
]
=
split_path
(
request
.
uri
)
while
path
and
path
[
0
]
==
'/'
:
path
=
path
[
1
:]
if
'%'
in
path
:
path
=
unquote
(
path
)
env
=
{}
env
[
'REQUEST_URI'
]
=
"/"
+
path
env
[
'REQUEST_METHOD'
]
=
string
.
upper
(
request
.
command
)
env
[
'SERVER_PORT'
]
=
str
(
request
.
channel
.
server
.
port
)
env
[
'SERVER_NAME'
]
=
request
.
channel
.
server
.
server_name
env
[
'SERVER_SOFTWARE'
]
=
request
[
'Server'
]
env
[
'DOCUMENT_ROOT'
]
=
self
.
document_root
parts
=
string
.
split
(
path
,
"/"
)
# are script_name and path_info ok?
env
[
'SCRIPT_NAME'
]
=
"/"
+
parts
[
0
]
if
query
and
query
[
0
]
==
"?"
:
query
=
query
[
1
:]
env
[
'QUERY_STRING'
]
=
query
try
:
path_info
=
"/"
+
string
.
join
(
parts
[
1
:],
"/"
)
except
:
path_info
=
''
env
[
'PATH_INFO'
]
=
path_info
env
[
'GATEWAY_INTERFACE'
]
=
'CGI/1.1'
# what should this really be?
env
[
'REMOTE_ADDR'
]
=
request
.
channel
.
addr
[
0
]
env
[
'REMOTE_HOST'
]
=
request
.
channel
.
addr
[
0
]
# TODO: connect to resolver
for
header
in
request
.
header
:
[
key
,
value
]
=
string
.
split
(
header
,
": "
,
1
)
key
=
string
.
lower
(
key
)
if
header2env
.
has_key
(
key
):
if
header2env
[
key
]:
env
[
header2env
[
key
]]
=
value
else
:
key
=
'HTTP_'
+
string
.
upper
(
string
.
join
(
string
.
split
(
key
,
"-"
),
"_"
)
)
env
[
key
]
=
value
## remove empty environment variables
for
key
in
env
.
keys
():
if
env
[
key
]
==
""
or
env
[
key
]
==
None
:
del
env
[
key
]
try
:
httphost
=
env
[
'HTTP_HOST'
]
parts
=
string
.
split
(
httphost
,
":"
)
env
[
'HTTP_HOST'
]
=
parts
[
0
]
except
KeyError
:
pass
if
request
.
command
in
(
'put'
,
'post'
):
# PUT data requires a correct Content-Length: header
# (though I bet with http/1.1 we can expect chunked encoding)
request
.
collector
=
collector
(
self
,
request
,
env
)
request
.
channel
.
set_terminator
(
None
)
else
:
sin
=
StringIO
.
StringIO
(
''
)
self
.
continue_request
(
sin
,
request
,
env
)
def
continue_request
(
self
,
stdin
,
request
,
env
):
stdout
=
header_scanning_file
(
request
,
thread_output_file
(
request
.
channel
)
)
self
.
queue
.
put
(
(
request
.
module
.
main
,
(
env
,
stdin
,
stdout
))
)
HEADER_LINE
=
re
.
compile
(
'([A-Za-z0-9-]+): ([^
\
r
\
n
]+)'
)
# A file wrapper that handles the CGI 'Status:' header hack
# by scanning the output.
class
header_scanning_file
:
def
__init__
(
self
,
request
,
file
):
self
.
buffer
=
''
self
.
request
=
request
self
.
file
=
file
self
.
got_header
=
0
self
.
bytes_out
=
counter
.
counter
()
def
write
(
self
,
data
):
if
self
.
got_header
:
self
.
_write
(
data
)
else
:
# CGI scripts may optionally provide extra headers.
#
# If they do not, then the output is assumed to be
# text/html, with an HTTP reply code of '200 OK'.
#
# If they do, we need to scan those headers for one in
# particular: the 'Status:' header, which will tell us
# to use a different HTTP reply code [like '302 Moved']
#
self
.
buffer
=
self
.
buffer
+
data
lines
=
string
.
split
(
self
.
buffer
,
'
\
n
'
)
# ignore the last piece, it is either empty, or a partial line
lines
=
lines
[:
-
1
]
# look for something un-header-like
for
i
in
range
(
len
(
lines
)):
li
=
lines
[
i
]
if
(
not
li
)
or
(
HEADER_LINE
.
match
(
li
)
is
None
):
# this is either the header separator, or it
# is not a header line.
self
.
got_header
=
1
h
=
self
.
build_header
(
lines
[:
i
])
self
.
_write
(
h
)
# rejoin the rest of the data
d
=
string
.
join
(
lines
[
i
:],
'
\
n
'
)
self
.
_write
(
d
)
self
.
buffer
=
''
break
def
build_header
(
self
,
lines
):
status
=
'200 OK'
saw_content_type
=
0
hl
=
HEADER_LINE
for
line
in
lines
:
mo
=
hl
.
match
(
line
)
if
mo
is
not
None
:
h
=
string
.
lower
(
mo
.
group
(
1
))
if
h
==
'status'
:
status
=
mo
.
group
(
2
)
elif
h
==
'content-type'
:
saw_content_type
=
1
lines
.
insert
(
0
,
'HTTP/1.0 %s'
%
status
)
lines
.
append
(
'Server: '
+
self
.
request
[
'Server'
])
lines
.
append
(
'Date: '
+
self
.
request
[
'Date'
])
if
not
saw_content_type
:
lines
.
append
(
'Content-Type: text/html'
)
lines
.
append
(
'Connection: close'
)
return
string
.
join
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
def
_write
(
self
,
data
):
self
.
bytes_out
.
increment
(
len
(
data
))
self
.
file
.
write
(
data
)
def
writelines
(
self
,
list
):
self
.
write
(
string
.
join
(
list
,
''
))
def
flush
(
self
):
pass
def
close
(
self
):
if
not
self
.
got_header
:
# managed to slip through our header detectors
self
.
_write
(
self
.
build_header
([
'Status: 502'
,
'Content-Type: text/html'
]))
self
.
_write
(
'<html><h1>Server Error</h1>
\
r
\
n
'
'<b>Bad Gateway:</b> No Header from CGI Script
\
r
\
n
'
'<pre>Data: %s</pre>'
'</html>
\
r
\
n
'
%
(
repr
(
self
.
buffer
))
)
self
.
request
.
log
(
int
(
self
.
bytes_out
.
as_long
()))
self
.
file
.
close
()
self
.
request
.
channel
.
current_request
=
None
class
collector
:
"gathers input for PUT requests"
def
__init__
(
self
,
handler
,
request
,
env
):
self
.
handler
=
handler
self
.
env
=
env
self
.
request
=
request
self
.
data
=
StringIO
.
StringIO
()
# make sure there's a content-length header
self
.
cl
=
request
.
get_header
(
'content-length'
)
if
not
self
.
cl
:
request
.
error
(
411
)
return
else
:
self
.
cl
=
string
.
atoi
(
self
.
cl
)
def
collect_incoming_data
(
self
,
data
):
self
.
data
.
write
(
data
)
if
self
.
data
.
tell
()
>=
self
.
cl
:
self
.
data
.
seek
(
0
)
h
=
self
.
handler
r
=
self
.
request
# set the terminator back to the default
self
.
request
.
channel
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
del
self
.
handler
del
self
.
request
h
.
continue_request
(
self
.
data
,
r
,
self
.
env
)
class
request_loop_thread
(
threading
.
Thread
):
def
__init__
(
self
,
queue
):
threading
.
Thread
.
__init__
(
self
)
self
.
setDaemon
(
1
)
self
.
queue
=
queue
def
run
(
self
):
while
1
:
function
,
(
env
,
stdin
,
stdout
)
=
self
.
queue
.
get
()
function
(
env
,
stdin
,
stdout
)
stdout
.
close
()
# ===========================================================================
# Testing
# ===========================================================================
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
<
2
:
print
'Usage: %s <worker_threads>'
%
sys
.
argv
[
0
]
else
:
nthreads
=
string
.
atoi
(
sys
.
argv
[
1
])
import
asyncore
import
http_server
# create a generic web server
hs
=
http_server
.
http_server
(
''
,
7080
)
# create a request queue
q
=
request_queue
()
# create a script handler
sh
=
script_handler
(
q
)
# install the script handler on the web server
hs
.
install_handler
(
sh
)
# get a couple of CGI modules
import
test_module
import
pi_module
# install the module on the script handler
sh
.
add_module
(
test_module
,
'test'
)
sh
.
add_module
(
pi_module
,
'pi'
)
# fire up the worker threads
for
i
in
range
(
nthreads
):
rt
=
request_loop_thread
(
q
)
rt
.
start
()
# start the main event loop
asyncore
.
loop
()
lib/python/ZServer/tests/__init__.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# This file is needed to make this directory a package.
lib/python/ZServer/tests/test_config.py
0 → 100644
View file @
c658c172
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the ZServer configuration machinery."""
import
cStringIO
as
StringIO
import
os
import
tempfile
import
unittest
import
ZConfig
import
ZServer.datatypes
TEMPFILENAME
=
tempfile
.
mktemp
()
class
ZServerConfigurationTestCase
(
unittest
.
TestCase
):
schema
=
None
def
get_schema
(
self
):
if
self
.
schema
is
None
:
sio
=
StringIO
.
StringIO
(
"""
<schema>
<import package='ZServer'/>
<multisection name='*' type='server' attribute='servers'/>
</schema>
"""
)
schema
=
ZConfig
.
loadSchemaFile
(
sio
)
ZServerConfigurationTestCase
.
schema
=
schema
return
self
.
schema
def
load_factory
(
self
,
text
):
conf
,
xxx
=
ZConfig
.
loadConfigFile
(
self
.
get_schema
(),
StringIO
.
StringIO
(
text
))
self
.
assertEqual
(
len
(
conf
.
servers
),
1
)
return
conf
.
servers
[
0
]
def
load_unix_domain_factory
(
self
,
text
):
fn
=
TEMPFILENAME
f
=
open
(
fn
,
'w'
)
f
.
close
()
try
:
factory
=
self
.
load_factory
(
text
%
fn
)
finally
:
os
.
unlink
(
fn
)
self
.
assert_
(
factory
.
host
is
None
)
self
.
assert_
(
factory
.
port
is
None
)
self
.
assertEqual
(
factory
.
path
,
fn
)
return
factory
def
test_http_factory
(
self
):
factory
=
self
.
load_factory
(
"""
\
<http-server>
address 81
force-connection-close true
webdav-source-clients cadaever
</http-server>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
HTTPServerFactory
))
self
.
assert_
(
factory
.
force_connection_close
)
self
.
assertEqual
(
factory
.
host
,
''
)
self
.
assertEqual
(
factory
.
port
,
81
)
self
.
assertEqual
(
factory
.
webdav_source_clients
,
"cadaever"
)
self
.
check_prepare
(
factory
)
server
=
factory
.
create
()
self
.
assertEqual
(
server
.
ip
,
'127.0.0.1'
)
self
.
assertEqual
(
server
.
port
,
9381
)
server
.
close
()
def
test_webdav_source_factory
(
self
):
factory
=
self
.
load_factory
(
"""
\
<webdav-source-server>
address 82
force-connection-close true
</webdav-source-server>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
WebDAVSourceServerFactory
))
self
.
assert_
(
factory
.
force_connection_close
)
self
.
assertEqual
(
factory
.
host
,
''
)
self
.
assertEqual
(
factory
.
port
,
82
)
self
.
check_prepare
(
factory
)
server
=
factory
.
create
()
self
.
assertEqual
(
server
.
ip
,
'127.0.0.1'
)
self
.
assertEqual
(
server
.
port
,
9382
)
server
.
close
()
def
test_pcgi_factory
(
self
):
factory
=
self
.
load_unix_domain_factory
(
"""
\
<persistent-cgi>
path %s
</persistent-cgi>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
PCGIServerFactory
))
def
test_fcgi_factory
(
self
):
factory
=
self
.
load_factory
(
"""
\
<fast-cgi>
address 83
</fast-cgi>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
FCGIServerFactory
))
self
.
assertEqual
(
factory
.
host
,
''
)
self
.
assertEqual
(
factory
.
port
,
83
)
self
.
assertEqual
(
factory
.
path
,
None
)
self
.
check_prepare
(
factory
)
factory
.
create
().
close
()
factory
=
self
.
load_unix_domain_factory
(
"""
\
<fast-cgi>
address %s
</fast-cgi>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
FCGIServerFactory
))
self
.
check_prepare
(
factory
)
def
test_ftp_factory
(
self
):
factory
=
self
.
load_factory
(
"""
\
<ftp-server>
address 84
</ftp-server>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
FTPServerFactory
))
self
.
assertEqual
(
factory
.
host
,
''
)
self
.
assertEqual
(
factory
.
port
,
84
)
self
.
check_prepare
(
factory
)
factory
.
create
().
close
()
def
test_monitor_factory
(
self
):
factory
=
self
.
load_factory
(
"""
\
<monitor-server>
address 85
</monitor-server>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
MonitorServerFactory
))
self
.
assertEqual
(
factory
.
host
,
''
)
self
.
assertEqual
(
factory
.
port
,
85
)
self
.
check_prepare
(
factory
)
factory
.
create
().
close
()
def
test_icp_factory
(
self
):
factory
=
self
.
load_factory
(
"""
\
<icp-server>
address 86
</icp-server>
"""
)
self
.
assert_
(
isinstance
(
factory
,
ZServer
.
datatypes
.
ICPServerFactory
))
self
.
assertEqual
(
factory
.
host
,
''
)
self
.
assertEqual
(
factory
.
port
,
86
)
self
.
check_prepare
(
factory
)
factory
.
create
().
close
()
def
check_prepare
(
self
,
factory
):
port
=
factory
.
port
o
=
object
()
factory
.
prepare
(
"127.0.0.1"
,
o
,
"module"
,
{
"key"
:
"value"
},
portbase
=
9300
)
self
.
assert_
(
factory
.
dnsresolver
is
o
)
self
.
assertEqual
(
factory
.
module
,
"module"
)
self
.
assertEqual
(
factory
.
cgienv
.
items
(),
[(
"key"
,
"value"
)])
if
port
is
None
:
self
.
assert_
(
factory
.
host
is
None
)
self
.
assert_
(
factory
.
port
is
None
)
else
:
self
.
assertEqual
(
factory
.
host
,
"127.0.0.1"
)
self
.
assertEqual
(
factory
.
port
,
9300
+
port
)
def
test_suite
():
return
unittest
.
makeSuite
(
ZServerConfigurationTestCase
)
if
__name__
==
"__main__"
:
unittest
.
main
(
defaultTest
=
"test_suite"
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment