Commit ac181a2d authored by Jim Fulton's avatar Jim Fulton

Checked in Scott Robertson's thread-safety fixes.

parent cbe5af3e
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
############################################################################## ##############################################################################
"""Access control package""" """Access control package"""
__version__='$Revision: 1.84 $'[11:-2] __version__='$Revision: 1.85 $'[11:-2]
import Globals, App.Undo, socket, regex import Globals, App.Undo, socket, regex
from Globals import HTMLFile, MessageDialog, Persistent, PersistentMapping from Globals import HTMLFile, MessageDialog, Persistent, PersistentMapping
...@@ -715,8 +715,8 @@ def rolejoin(roles, other): ...@@ -715,8 +715,8 @@ def rolejoin(roles, other):
roles.sort() roles.sort()
return roles return roles
addr_match=regex.compile('[0-9\.\*]*').match addr_match=regex.compile('[0-9\.\*]*').match #TS
host_match=regex.compile('[A-Za-z0-9\.\*]*').match host_match=regex.compile('[A-Za-z0-9\.\*]*').match #TS
def domainSpecMatch(spec, request): def domainSpecMatch(spec, request):
......
...@@ -155,7 +155,7 @@ class Product(Folder): ...@@ -155,7 +155,7 @@ class Product(Folder):
import_error_=None import_error_=None
def new_version(self, def new_version(self,
_intending=regex.compile("[.]?[0-9]+$").search, _intending=regex.compile("[.]?[0-9]+$").search, #TS
): ):
# Return a new version number based on the existing version. # Return a new version number based on the existing version.
v=str(self.version) v=str(self.version)
......
...@@ -84,9 +84,9 @@ ...@@ -84,9 +84,9 @@
############################################################################## ##############################################################################
__doc__="""Object Manager __doc__="""Object Manager
$Id: ObjectManager.py,v 1.76 1999/07/08 11:28:50 jim Exp $""" $Id: ObjectManager.py,v 1.77 1999/07/15 16:49:02 jim Exp $"""
__version__='$Revision: 1.76 $'[11:-2] __version__='$Revision: 1.77 $'[11:-2]
import App.Management, Acquisition, App.Undo, Globals, CopySupport import App.Management, Acquisition, App.Undo, Globals, CopySupport
import os, App.FactoryDispatcher, ts_regex, Products import os, App.FactoryDispatcher, ts_regex, Products
...@@ -98,7 +98,7 @@ from urllib import quote ...@@ -98,7 +98,7 @@ from urllib import quote
from cStringIO import StringIO from cStringIO import StringIO
import marshal import marshal
bad_id=ts_regex.compile('[^a-zA-Z0-9-_~\,\. ]').match bad_id=ts_regex.compile('[^a-zA-Z0-9-_~\,\. ]').match #TS
_marker=[] _marker=[]
class ObjectManager( class ObjectManager(
......
...@@ -84,12 +84,12 @@ ...@@ -84,12 +84,12 @@
############################################################################## ##############################################################################
__doc__='''Shared classes and functions __doc__='''Shared classes and functions
$Id: Aqueduct.py,v 1.32 1999/03/22 23:20:16 jim Exp $''' $Id: Aqueduct.py,v 1.33 1999/07/15 16:38:22 jim Exp $'''
__version__='$Revision: 1.32 $'[11:-2] __version__='$Revision: 1.33 $'[11:-2]
import Globals, os import Globals, os
from Globals import HTMLFile, Persistent from Globals import HTMLFile, Persistent
import DocumentTemplate, DateTime, regex, regsub, string import DocumentTemplate, DateTime, ts_regex, regex, string
import binascii, Acquisition import binascii, Acquisition
DateTime.now=DateTime.DateTime DateTime.now=DateTime.DateTime
from cStringIO import StringIO from cStringIO import StringIO
...@@ -97,7 +97,7 @@ from OFS import SimpleItem ...@@ -97,7 +97,7 @@ from OFS import SimpleItem
from AccessControl.Role import RoleManager from AccessControl.Role import RoleManager
from DocumentTemplate import HTML from DocumentTemplate import HTML
from string import strip from string import strip, replace
dtml_dir=Globals.package_home(globals()) dtml_dir=Globals.package_home(globals())
...@@ -337,11 +337,11 @@ class Args: ...@@ -337,11 +337,11 @@ class Args:
def parse(text, def parse(text,
result=None, result=None,
keys=None, keys=None,
unparmre=regex.compile( unparmre=ts_regex.compile(
'\([\0- ]*\([^\0- =\"]+\)\)'), '\([\0- ]*\([^\0- =\"]+\)\)'),
parmre=regex.compile( parmre=ts_regex.compile(
'\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'), '\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'),
qparmre=regex.compile( qparmre=ts_regex.compile(
'\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'), '\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'),
): ):
...@@ -351,21 +351,30 @@ def parse(text, ...@@ -351,21 +351,30 @@ def parse(text,
__traceback_info__=text __traceback_info__=text
if parmre.match(text) >= 0: ts_results = parmre.match_group(text, (1,2,3))
name=parmre.group(2) if ts_results:
value={'default':parmre.group(3)} start, grps = ts_results
l=len(parmre.group(1)) name=grps[1]
elif qparmre.match(text) >= 0: value={'default':grps[2]}
name=qparmre.group(2) l=len(grps[0])
value={'default':qparmre.group(3)}
l=len(qparmre.group(1))
elif unparmre.match(text) >= 0:
name=unparmre.group(2)
l=len(unparmre.group(1))
value={}
else: else:
if not text or not strip(text): return Args(result,keys) ts_results = qparmre.match_group(text, (1,2,3))
raise InvalidParameter, text if ts_results:
start, grps = ts_results
name=grps[1]
value={'default':grps[2]}
l=len(grps[0])
else:
ts_results = unparmre.match_group(text, (1,2))
if ts_reults:
start, grps = ts_results
name=grps[1]
l=len(grps[0])
value={}
else:
if not text or not strip(text): return Args(result,keys)
raise InvalidParameter, text
lt=string.find(name,':') lt=string.find(name,':')
if lt > 0: if lt > 0:
...@@ -379,33 +388,38 @@ def parse(text, ...@@ -379,33 +388,38 @@ def parse(text,
def quotedHTML(text, def quotedHTML(text,
character_entities=( character_entities=(
(regex.compile('&'), '&'), ('&', '&'),
(regex.compile("<"), '&lt;' ), ("<", '&lt;' ),
(regex.compile(">"), '&gt;' ), (">", '&gt;' ),
(regex.compile('"'), '&quot;'))): #" ('"', '&quot;'))): #"
import regsub
for re,name in character_entities: for re,name in character_entities:
text=regsub.gsub(re,name,text) text=replace(text,re,name)
return text return text
def nicify(name, under=regex.compile('_')): def nicify(name):
name=regsub.gsub(under,' ',string.strip(name)) name=replace(string.strip(name), '_',' ')
return string.upper(name[:1])+name[1:] return string.upper(name[:1])+name[1:]
def decapitate(html, RESPONSE=None, def decapitate(html, RESPONSE=None,
header_re=regex.compile( header_re=ts_regex.compile(
'\(\(' '\(\('
'[^\0- <>:]+:[^\n]*\n' '[^\0- <>:]+:[^\n]*\n'
'\|' '\|'
'[ \t]+[^\0- ][^\n]*\n' '[ \t]+[^\0- ][^\n]*\n'
'\)+\)[ \t]*\n\([\0-\377]+\)' '\)+\)[ \t]*\n\([\0-\377]+\)'
), ),
space_re=regex.compile('\([ \t]+\)'), space_re=ts_regex.compile('\([ \t]+\)'),
name_re=regex.compile('\([^\0- <>:]+\):\([^\n]*\)'), name_re=ts_regex.compile('\([^\0- <>:]+\):\([^\n]*\)'),
): ):
if header_re.match(html) < 0: return html
headers, html = header_re.group(1,3)
ts_results = header_re.match_group(html, (1,3))
if not ts_results: return html
headers, html = ts_reulsts[1]
headers=string.split(headers,'\n') headers=string.split(headers,'\n')
...@@ -413,16 +427,19 @@ def decapitate(html, RESPONSE=None, ...@@ -413,16 +427,19 @@ def decapitate(html, RESPONSE=None,
while i < len(headers): while i < len(headers):
if not headers[i]: if not headers[i]:
del headers[i] del headers[i]
elif space_re.match(headers[i]) >= 0: else:
headers[i-1]="%s %s" % (headers[i-1], ts_results = space_re.match_group(headers[i], (1,))
headers[i][len(space_re.group(1)):]) if ts_reults:
del headers[i] headers[i-1]="%s %s" % (headers[i-1],
else: headers[i][len(ts_reults[1]):])
i=i+1 del headers[i]
else:
i=i+1
for i in range(len(headers)): for i in range(len(headers)):
if name_re.match(headers[i]) >= 0: ts_results = name_re.match_group(headers[i], (1,2))
k, v = name_re.group(1,2) if ts_reults:
k, v = ts_reults[1]
v=string.strip(v) v=string.strip(v)
else: else:
raise ValueError, 'Invalid Header (%d): %s ' % (i,headers[i]) raise ValueError, 'Invalid Header (%d): %s ' % (i,headers[i])
......
...@@ -170,7 +170,7 @@ Special symbology is used to indicate special constructs: ...@@ -170,7 +170,7 @@ Special symbology is used to indicate special constructs:
Together with the previous rule this allows easy coding of references or Together with the previous rule this allows easy coding of references or
end notes. end notes.
$Id: StructuredText.py,v 1.18 1999/03/24 00:03:18 klm Exp $''' $Id: StructuredText.py,v 1.19 1999/07/15 16:43:15 jim Exp $'''
# Copyright # Copyright
# #
# Copyright 1996 Digital Creations, L.C., 910 Princess Anne # Copyright 1996 Digital Creations, L.C., 910 Princess Anne
...@@ -222,6 +222,9 @@ $Id: StructuredText.py,v 1.18 1999/03/24 00:03:18 klm Exp $''' ...@@ -222,6 +222,9 @@ $Id: StructuredText.py,v 1.18 1999/03/24 00:03:18 klm Exp $'''
# (540) 371-6909 # (540) 371-6909
# #
# $Log: StructuredText.py,v $ # $Log: StructuredText.py,v $
# Revision 1.19 1999/07/15 16:43:15 jim
# Checked in Scott Robertson's thread-safety fixes.
#
# Revision 1.18 1999/03/24 00:03:18 klm # Revision 1.18 1999/03/24 00:03:18 klm
# Provide for relative links, eg <a href="file_in_same_dir">whatever</a>, # Provide for relative links, eg <a href="file_in_same_dir">whatever</a>,
# as: # as:
...@@ -327,14 +330,13 @@ $Id: StructuredText.py,v 1.18 1999/03/24 00:03:18 klm Exp $''' ...@@ -327,14 +330,13 @@ $Id: StructuredText.py,v 1.18 1999/03/24 00:03:18 klm Exp $'''
# #
# #
# #
import ts_regex
import regex, regsub from ts_regex import gsub
from regsub import gsub
from string import split, join, strip, find from string import split, join, strip, find
indent_tab =regex.compile('\(\n\|^\)\( *\)\t') indent_tab =ts_regex.compile('\(\n\|^\)\( *\)\t')
indent_space=regex.compile('\n\( *\)') indent_space=ts_regex.compile('\n\( *\)')
paragraph_divider=regex.compile('\(\n *\)+\n') paragraph_divider=ts_regex.compile('\(\n *\)+\n')
def untabify(aString): def untabify(aString):
'''\ '''\
...@@ -343,10 +345,11 @@ def untabify(aString): ...@@ -343,10 +345,11 @@ def untabify(aString):
result='' result=''
rest=aString rest=aString
while 1: while 1:
start=indent_tab.search(rest) ts_results = indent_tab.search_group(rest, (1,2))
if start >= 0: if ts_results:
lnl=len(indent_tab.group(1)) start, grps = ts_results
indent=len(indent_tab.group(2)) lnl=len(grps[0])
indent=len(grps[1])
result=result+rest[:start] result=result+rest[:start]
rest="\n%s%s" % (' ' * ((indent/8+1)*8), rest="\n%s%s" % (' ' * ((indent/8+1)*8),
rest[start+indent+1+lnl:]) rest[start+indent+1+lnl:])
...@@ -390,9 +393,11 @@ def indent_level(aString): ...@@ -390,9 +393,11 @@ def indent_level(aString):
text='\n'+aString text='\n'+aString
indent=l=len(text) indent=l=len(text)
while 1: while 1:
start=indent_space.search(text,start)
if start >= 0: ts_results = indent_space.search_group(text, (1,2), start)
i=len(indent_space.group(1)) if ts_results:
start, grps = ts_results
i=len(grps[0])
start=start+i+1 start=start+i+1
if start < l and text[start] != '\n': # Skip blank lines if start < l and text[start] != '\n': # Skip blank lines
if not i: return (0,aString) if not i: return (0,aString)
...@@ -419,12 +424,12 @@ def structure(list): ...@@ -419,12 +424,12 @@ def structure(list):
i=i+sublen i=i+sublen
return r return r
bullet=regex.compile('[ \t\n]*[o*-][ \t\n]+\([^\0]*\)') bullet=ts_regex.compile('[ \t\n]*[o*-][ \t\n]+\([^\0]*\)')
example=regex.compile('[\0- ]examples?:[\0- ]*$') example=ts_regex.compile('[\0- ]examples?:[\0- ]*$').search
dl=regex.compile('\([^\n]+\)[ \t]+--[ \t\n]+\([^\0]*\)') dl=ts_regex.compile('\([^\n]+\)[ \t]+--[ \t\n]+\([^\0]*\)')
nl=regex.compile('\n') nl=ts_regex.compile('\n').search
ol=regex.compile('[ \t]*\(\([0-9]+\|[a-zA-Z]+\)[.)]\)+[ \t\n]+\([^\0]*\|$\)') ol=ts_regex.compile('[ \t]*\(\([0-9]+\|[a-zA-Z]+\)[.)]\)+[ \t\n]+\([^\0]*\|$\)')
olp=regex.compile('[ \t]*([0-9]+)[ \t\n]+\([^\0]*\|$\)') olp=ts_regex.compile('[ \t]*([0-9]+)[ \t\n]+\([^\0]*\|$\)')
optional_trailing_punctuation = '\(,\|\([.:?;]\)\)?' optional_trailing_punctuation = '\(,\|\([.:?;]\)\)?'
...@@ -474,7 +479,7 @@ class StructuredText: ...@@ -474,7 +479,7 @@ class StructuredText:
aStructuredString) aStructuredString)
self.level=level self.level=level
paragraphs=regsub.split(untabify(aStructuredString),paragraph_divider) paragraphs=ts_regex.split(untabify(aStructuredString),paragraph_divider)
paragraphs=map(indent_level,paragraphs) paragraphs=map(indent_level,paragraphs)
self.structure=structure(paragraphs) self.structure=structure(paragraphs)
...@@ -488,10 +493,10 @@ ctag_prefix="\([\0- (]\|^\)" ...@@ -488,10 +493,10 @@ ctag_prefix="\([\0- (]\|^\)"
ctag_suffix="\([\0- ,.:;!?)]\|$\)" ctag_suffix="\([\0- ,.:;!?)]\|$\)"
ctag_middle="[%s]\([^\0- %s][^%s]*[^\0- %s]\|[^%s]\)[%s]" ctag_middle="[%s]\([^\0- %s][^%s]*[^\0- %s]\|[^%s]\)[%s]"
ctag_middl2="[%s][%s]\([^\0- %s][^%s]*[^\0- %s]\|[^%s]\)[%s][%s]" ctag_middl2="[%s][%s]\([^\0- %s][^%s]*[^\0- %s]\|[^%s]\)[%s][%s]"
em =regex.compile(ctag_prefix+(ctag_middle % (("*",)*6) )+ctag_suffix) em =ts_regex.compile(ctag_prefix+(ctag_middle % (("*",)*6) )+ctag_suffix)
strong=regex.compile(ctag_prefix+(ctag_middl2 % (("*",)*8))+ctag_suffix) strong=ts_regex.compile(ctag_prefix+(ctag_middl2 % (("*",)*8))+ctag_suffix)
under =regex.compile(ctag_prefix+(ctag_middle % (("_",)*6) )+ctag_suffix) under =ts_regex.compile(ctag_prefix+(ctag_middle % (("_",)*6) )+ctag_suffix)
code =regex.compile(ctag_prefix+(ctag_middle % (("\'",)*6))+ctag_suffix) code =ts_regex.compile(ctag_prefix+(ctag_middle % (("\'",)*6))+ctag_suffix)
def ctag(s): def ctag(s):
if s is None: s='' if s is None: s=''
...@@ -508,9 +513,9 @@ class HTML(StructuredText): ...@@ -508,9 +513,9 @@ class HTML(StructuredText):
'''\ '''\
def __str__(self, def __str__(self,
extra_dl=regex.compile("</dl>\n<dl>"), extra_dl=ts_regex.compile("</dl>\n<dl>"),
extra_ul=regex.compile("</ul>\n<ul>"), extra_ul=ts_regex.compile("</ul>\n<ul>"),
extra_ol=regex.compile("</ol>\n<ol>"), extra_ol=ts_regex.compile("</ol>\n<ol>"),
): ):
'''\ '''\
Return an HTML string representation of the structured text data. Return an HTML string representation of the structured text data.
...@@ -563,40 +568,52 @@ class HTML(StructuredText): ...@@ -563,40 +568,52 @@ class HTML(StructuredText):
r='' r=''
for s in structure: for s in structure:
# print s[0],'\n', len(s[1]), '\n\n' # print s[0],'\n', len(s[1]), '\n\n'
if bullet.match(s[0]) >= 0:
p=bullet.group(1) ts_results = bullet.match_group(s[0], (1,))
if ts_results:
p = ts_results[1]
r=self.ul(r,p,self._str(s[1],level)) r=self.ul(r,p,self._str(s[1],level))
elif ol.match(s[0]) >= 0: else:
p=ol.group(3) ts_results = ol.match_group(s[0], (3,))
r=self.ol(r,p,self._str(s[1],level)) if ts_results:
elif olp.match(s[0]) >= 0: p = ts_results[1]
p=olp.group(1) r=self.ol(r,p,self._str(s[1],level))
r=self.ol(r,p,self._str(s[1],level)) else:
elif dl.match(s[0]) >= 0: ts_results = olp.match_group(s[0], (1,))
t,d=dl.group(1,2) if ts_results:
r=self.dl(r,t,d,self._str(s[1],level)) p = ts_results[1]
elif example.search(s[0]) >= 0 and s[1]: r=self.ol(r,p,self._str(s[1],level))
# Introduce an example, using pre tags: else:
r=self.normal(r,s[0],self.pre(s[1])) ts_results = dl.match_group(s[0], (1,2))
elif s[0][-2:]=='::' and s[1]: if ts_results:
# Introduce an example, using pre tags: t,d = ts_results[1]
r=self.normal(r,s[0][:-1],self.pre(s[1])) r=self.dl(r,t,d,self._str(s[1],level))
elif nl.search(s[0]) < 0 and s[1] and s[0][-1:] != ':': else:
# Treat as a heading if example(s[0]) >= 0 and s[1]:
t=s[0] # Introduce an example, using pre tags:
r=self.head(r,t,level, r=self.normal(r,s[0],self.pre(s[1]))
self._str(s[1],level and level+1)) else:
else: if s[0][-2:]=='::' and s[1]:
r=self.normal(r,s[0],self._str(s[1],level)) # Introduce an example, using pre tags:
r=self.normal(r,s[0][:-1],self.pre(s[1]))
else:
if nl(s[0]) < 0 and s[1] and s[0][-1:] != ':':
# Treat as a heading
t=s[0]
r=self.head(r,t,level,
self._str(s[1],level and level+1))
else:
r=self.normal(r,s[0],self._str(s[1],level))
return r return r
def html_quote(v, def html_quote(v,
character_entities=( character_entities=(
(regex.compile('&'), '&amp;'), (ts_regex.compile('&'), '&amp;'),
(regex.compile("<"), '&lt;' ), (ts_regex.compile("<"), '&lt;' ),
(regex.compile(">"), '&gt;' ), (ts_regex.compile(">"), '&gt;' ),
(regex.compile('"'), '&quot;'))): #" (ts_regex.compile('"'), '&quot;'))): #"
text=str(v) text=str(v)
for re,name in character_entities: for re,name in character_entities:
text=gsub(re,name,text) text=gsub(re,name,text)
...@@ -633,17 +650,18 @@ def main(): ...@@ -633,17 +650,18 @@ def main():
s=sys.stdin.read() s=sys.stdin.read()
if opts: if opts:
import regex, regsub
if filter(lambda o: o[0]=='-w', opts): if filter(lambda o: o[0]=='-w', opts):
print 'Content-Type: text/html\n' print 'Content-Type: text/html\n'
if s[:2]=='#!': if s[:2]=='#!':
s=regsub.sub('^#![^\n]+','',s) s=ts_regex.sub('^#![^\n]+','',s)
r=regex.compile('\([\0-\n]*\n\)') r=ts_regex.compile('\([\0-\n]*\n\)')
if r.match(s) >= 0: ts_results = r.match_group(s, (1,))
s=s[len(r.group(1)):] if ts_results:
s=s[len(ts_results[1]):]
s=str(html_with_references(s)) s=str(html_with_references(s))
if s[:4]=='<h1>': if s[:4]=='<h1>':
t=s[4:find(s,'</h1>')] t=s[4:find(s,'</h1>')]
......
...@@ -103,18 +103,17 @@ that allows one to simply make a single web request. ...@@ -103,18 +103,17 @@ that allows one to simply make a single web request.
The module also provides a command-line interface for calling objects. The module also provides a command-line interface for calling objects.
""" """
__version__='$Revision: 1.30 $'[11:-2] __version__='$Revision: 1.31 $'[11:-2]
import sys, regex, socket, mimetools import sys, regex, socket, mimetools
from httplib import HTTP from httplib import HTTP
from os import getpid from os import getpid
from time import time from time import time
from random import random from random import random
from regsub import gsub
from base64 import encodestring from base64 import encodestring
from urllib import urlopen, quote from urllib import urlopen, quote
from types import FileType, ListType, DictType, TupleType from types import FileType, ListType, DictType, TupleType
from string import strip, split, atoi, join, rfind, translate, maketrans from string import strip, split, atoi, join, rfind, translate, maketrans, replace
from urlparse import urlparse from urlparse import urlparse
class Function: class Function:
...@@ -205,9 +204,9 @@ class Function: ...@@ -205,9 +204,9 @@ class Function:
not headers.has_key('Authorization')): not headers.has_key('Authorization')):
headers['Authorization']=( headers['Authorization']=(
"Basic %s" % "Basic %s" %
gsub('\012','',encodestring('%s:%s' % ( replace(encodestring('%s:%s' % (self.username,self.password),
self.username,self.password)))) '\012','')))
try: try:
h=HTTP() h=HTTP()
h.connect(self.host, self.port) h.connect(self.host, self.port)
...@@ -268,8 +267,7 @@ class Function: ...@@ -268,8 +267,7 @@ class Function:
for n,v in self.headers.items(): for n,v in self.headers.items():
rq.append('%s: %s' % (n,v)) rq.append('%s: %s' % (n,v))
if self.username and self.password: if self.username and self.password:
c=gsub('\012','',encodestring('%s:%s' % ( c=replace(encodestring('%s:%s' % (self.username,self.password)),'\012','')
self.username,self.password)))
rq.append('Authorization: Basic %s' % c) rq.append('Authorization: Basic %s' % c)
rq.append(MultiPart(d).render()) rq.append(MultiPart(d).render())
rq=join(rq,'\n') rq=join(rq,'\n')
...@@ -480,7 +478,7 @@ class MultiPart: ...@@ -480,7 +478,7 @@ class MultiPart:
elif dt==FileType or hasattr(val,'read'): elif dt==FileType or hasattr(val,'read'):
if hasattr(val,'name'): if hasattr(val,'name'):
fn=gsub('\\\\','/',val.name) fn=replace(val.name, '\\', '/')
fn=fn[(rfind(fn,'/')+1):] fn=fn[(rfind(fn,'/')+1):]
ex=fn[(rfind(fn,'.')+1):] ex=fn[(rfind(fn,'.')+1):]
if self._extmap.has_key(ex): ct=self._extmap[ex] if self._extmap.has_key(ex): ct=self._extmap[ex]
......
"""HTTP 1.1 / WebDAV client library.""" """HTTP 1.1 / WebDAV client library."""
__version__='$Revision: 1.9 $'[11:-2] __version__='$Revision: 1.10 $'[11:-2]
import sys, os, string, regex, time, types import sys, os, string, regex, time, types
import socket, httplib, mimetools import socket, httplib, mimetools
...@@ -10,7 +10,7 @@ from base64 import encodestring ...@@ -10,7 +10,7 @@ from base64 import encodestring
from cStringIO import StringIO from cStringIO import StringIO
from whrandom import random from whrandom import random
from urllib import quote from urllib import quote
from regsub import gsub
...@@ -88,9 +88,8 @@ class Resource: ...@@ -88,9 +88,8 @@ class Resource:
return headers return headers
if atype=='Basic': if atype=='Basic':
headers['Authorization']=( headers['Authorization']=(
"Basic %s" % "Basic %s" % string.replace(encodestring('%s:%s' % (self.username,self.password)),
gsub('\012','',encodestring('%s:%s' % ( '\012',''))
self.username,self.password))))
return headers return headers
raise ValueError, 'Unknown authentication scheme: %s' % atype raise ValueError, 'Unknown authentication scheme: %s' % atype
...@@ -503,7 +502,7 @@ class MultiPart: ...@@ -503,7 +502,7 @@ class MultiPart:
if hasattr(val,'name'): if hasattr(val,'name'):
ct, enc=guess_type(val.name) ct, enc=guess_type(val.name)
if not ct: ct='application/octet-stream' if not ct: ct='application/octet-stream'
fn=gsub('\\\\','/',val.name) fn=string.replace(val.name,'\\','/')
fn=fn[(string.rfind(fn,'/')+1):] fn=fn[(string.rfind(fn,'/')+1):]
else: else:
ct='application/octet-stream' ct='application/octet-stream'
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment