Commit fd7121a6 authored by Andreas Jung's avatar Andreas Jung

Yeah...Zope is now a regex|ts_regex|regsub zone. Replaced and removed all old...

Yeah...Zope is now a regex|ts_regex|regsub zone. Replaced and removed all old regex stuff. No one survived.
parent cedef143
...@@ -93,9 +93,8 @@ from ZPublisher.HTTPRequest import HTTPRequest ...@@ -93,9 +93,8 @@ from ZPublisher.HTTPRequest import HTTPRequest
from cStringIO import StringIO from cStringIO import StringIO
import os import os
from regsub import gsub
from base64 import encodestring from base64 import encodestring
import string import string,re
class FTPRequest(HTTPRequest): class FTPRequest(HTTPRequest):
...@@ -141,7 +140,7 @@ class FTPRequest(HTTPRequest): ...@@ -141,7 +140,7 @@ class FTPRequest(HTTPRequest):
env['REQUEST_METHOD']='GET' # XXX what should this be? env['REQUEST_METHOD']='GET' # XXX what should this be?
env['SERVER_SOFTWARE']=channel.server.SERVER_IDENT env['SERVER_SOFTWARE']=channel.server.SERVER_IDENT
if channel.userid != 'anonymous': if channel.userid != 'anonymous':
env['HTTP_AUTHORIZATION']='Basic %s' % gsub('\012','', env['HTTP_AUTHORIZATION']='Basic %s' % re.sub('\012','',
encodestring('%s:%s' % (channel.userid, channel.password))) encodestring('%s:%s' % (channel.userid, channel.password)))
env['SERVER_NAME']=channel.server.hostname env['SERVER_NAME']=channel.server.hostname
env['SERVER_PORT']=str(channel.server.port) env['SERVER_PORT']=str(channel.server.port)
......
...@@ -84,9 +84,9 @@ ...@@ -84,9 +84,9 @@
############################################################################## ##############################################################################
"""Access control package""" """Access control package"""
__version__='$Revision: 1.147 $'[11:-2] __version__='$Revision: 1.148 $'[11:-2]
import Globals, socket, ts_regex, SpecialUsers import Globals, socket, SpecialUsers,re
import os import os
from Globals import DTMLFile, MessageDialog, Persistent, PersistentMapping from Globals import DTMLFile, MessageDialog, Persistent, PersistentMapping
from string import join, strip, split, lower, upper from string import join, strip, split, lower, upper
...@@ -1009,14 +1009,14 @@ def rolejoin(roles, other): ...@@ -1009,14 +1009,14 @@ def rolejoin(roles, other):
roles.sort() roles.sort()
return roles return roles
addr_match=ts_regex.compile('[0-9\.\*]*').match #TS addr_match=re.compile(r'[\d.]*').match
host_match=ts_regex.compile('[-A-Za-z0-9\.\*]*').match #TS host_match=re.compile(r'[-\w.]*').match
def domainSpecMatch(spec, request): def domainSpecMatch(spec, request):
host='' host=''
addr='' addr=''
# Fast exit for the match-all case # Fast exit for the match-all case
if len(spec) == 1 and spec[0] == '*': if len(spec) == 1 and spec[0] == '*':
return 1 return 1
...@@ -1037,6 +1037,7 @@ def domainSpecMatch(spec, request): ...@@ -1037,6 +1037,7 @@ def domainSpecMatch(spec, request):
try: addr=socket.gethostbyname(host) try: addr=socket.gethostbyname(host)
except: pass except: pass
_host=split(host, '.') _host=split(host, '.')
_addr=split(addr, '.') _addr=split(addr, '.')
_hlen=len(_host) _hlen=len(_host)
...@@ -1047,35 +1048,39 @@ def domainSpecMatch(spec, request): ...@@ -1047,35 +1048,39 @@ def domainSpecMatch(spec, request):
_ob=split(ob, '.') _ob=split(ob, '.')
_sz=len(_ob) _sz=len(_ob)
if addr_match(ob)==sz: mo = addr_match(ob)
fail=0 if mo is not None:
for i in range(_sz): if mo.end(0)==sz:
a=_addr[i] fail=0
o=_ob[i] for i in range(_sz):
if (o != a) and (o != '*'): a=_addr[i]
fail=1 o=_ob[i]
break if (o != a) and (o != '*'):
if fail: fail=1
continue break
return 1 if fail:
continue
return 1
if host_match(ob)==sz: mo = host_match(ob)
if _hlen < _sz: if mo is not None:
continue if mo.end(0)==sz:
elif _hlen > _sz: if _hlen < _sz:
_item=_host[-_sz:] continue
else: elif _hlen > _sz:
_item=_host _item=_host[-_sz:]
fail=0 else:
for i in range(_sz): _item=_host
h=_item[i] fail=0
o=_ob[i] for i in range(_sz):
if (o != h) and (o != '*'): h=_item[i]
fail=1 o=_ob[i]
break if (o != h) and (o != '*'):
if fail: fail=1
continue break
return 1 if fail:
continue
return 1
return 0 return 0
......
...@@ -107,7 +107,7 @@ ...@@ -107,7 +107,7 @@
import Globals, OFS.Folder, OFS.SimpleItem, os, string, Acquisition, Products import Globals, OFS.Folder, OFS.SimpleItem, os, string, Acquisition, Products
import regex, zlib, Globals, cPickle, marshal, rotor import re, zlib, Globals, cPickle, marshal, rotor
import ZClasses, ZClasses.ZClass, AccessControl.Owned import ZClasses, ZClasses.ZClass, AccessControl.Owned
from OFS.Folder import Folder from OFS.Folder import Folder
...@@ -157,12 +157,12 @@ class Product(Folder, PermissionManager): ...@@ -157,12 +157,12 @@ class Product(Folder, PermissionManager):
_isBeingUsedAsAMethod_=1 _isBeingUsedAsAMethod_=1
def new_version(self, def new_version(self,
_intending=regex.compile("[.]?[0-9]+$").search, #TS _intending=re.compile(r"[.]?[0-9]+$").search, #TS
): ):
# Return a new version number based on the existing version. # Return a new version number based on the existing version.
v=str(self.version) v=str(self.version)
if not v: return '1.0' if not v: return '1.0'
if _intending(v) < 0: return v if _intending(v) is None: return v
l=rfind(v,'.') l=rfind(v,'.')
return v[:l+1]+str(1+atoi(v[l+1:])) return v[:l+1]+str(1+atoi(v[l+1:]))
......
...@@ -84,10 +84,10 @@ ...@@ -84,10 +84,10 @@
############################################################################## ##############################################################################
"""Encapsulation of date/time values""" """Encapsulation of date/time values"""
__version__='$Revision: 1.64 $'[11:-2] __version__='$Revision: 1.65 $'[11:-2]
import sys, os, math, regex, ts_regex, DateTimeZone import re,sys, os, math, DateTimeZone
from string import strip,split,upper,lower,atoi,atof,find,join from string import strip,split,upper,lower,atoi,atof,find,join
from time import time, gmtime, localtime, asctime from time import time, gmtime, localtime, asctime
from time import timezone, strftime, mktime from time import timezone, strftime, mktime
...@@ -108,7 +108,7 @@ EPOCH =(to_year+to_month+dy+(hr/24.0+mn/1440.0+sc/86400.0))*86400 ...@@ -108,7 +108,7 @@ EPOCH =(to_year+to_month+dy+(hr/24.0+mn/1440.0+sc/86400.0))*86400
jd1901 =2415385L jd1901 =2415385L
numericTimeZoneMatch=regex.compile('[+-][\0-\9][\0-\9][\0-\9][\0-\9]').match #TS numericTimeZoneMatch=re.compile(r'[+-][0-9][0-9][0-9][0-9]').match #TS
...@@ -306,7 +306,7 @@ class _cache: ...@@ -306,7 +306,7 @@ class _cache:
def __getitem__(self,k): def __getitem__(self,k):
try: n=self._zmap[lower(k)] try: n=self._zmap[lower(k)]
except KeyError: except KeyError:
if numericTimeZoneMatch(k) <= 0: if numericTimeZoneMatch(k) == None:
raise 'DateTimeError','Unrecognized timezone: %s' % k raise 'DateTimeError','Unrecognized timezone: %s' % k
return k return k
try: return self._d[n] try: return self._d[n]
...@@ -436,7 +436,7 @@ def _tzoffset(tz, t): ...@@ -436,7 +436,7 @@ def _tzoffset(tz, t):
try: try:
return DateTime._tzinfo[tz].info(t)[0] return DateTime._tzinfo[tz].info(t)[0]
except: except:
if numericTimeZoneMatch(tz) > 0: if numericTimeZoneMatch(tz) is not None:
return atoi(tz[1:3])*3600+atoi(tz[3:5])*60 return atoi(tz[1:3])*3600+atoi(tz[3:5])*60
else: else:
return 0 # ?? return 0 # ??
...@@ -717,7 +717,7 @@ class DateTime: ...@@ -717,7 +717,7 @@ class DateTime:
if tz: if tz:
try: tz=self._tzinfo._zmap[lower(tz)] try: tz=self._tzinfo._zmap[lower(tz)]
except KeyError: except KeyError:
if numericTimeZoneMatch(tz) <= 0: if numericTimeZoneMatch(tz) is None:
raise self.DateTimeError, \ raise self.DateTimeError, \
'Unknown time zone in date: %s' % arg 'Unknown time zone in date: %s' % arg
else: else:
...@@ -785,7 +785,7 @@ class DateTime: ...@@ -785,7 +785,7 @@ class DateTime:
if tz: if tz:
try: tz=self._tzinfo._zmap[lower(tz)] try: tz=self._tzinfo._zmap[lower(tz)]
except KeyError: except KeyError:
if numericTimeZoneMatch(tz) <= 0: if numericTimeZoneMatch(tz) is None:
raise self.DateTimeError, \ raise self.DateTimeError, \
'Unknown time zone: %s' % tz 'Unknown time zone: %s' % tz
else: else:
...@@ -817,9 +817,9 @@ class DateTime: ...@@ -817,9 +817,9 @@ class DateTime:
DateTimeError='DateTimeError' DateTimeError='DateTimeError'
SyntaxError ='Invalid Date-Time String' SyntaxError ='Invalid Date-Time String'
DateError ='Invalid Date Components' DateError ='Invalid Date Components'
int_pattern =ts_regex.compile('\([0-9]+\)') #TS int_pattern =re.compile(r'([0-9]+)') #AJ
flt_pattern =ts_regex.compile(':\([0-9]+\.[0-9]+\)') #TS flt_pattern =re.compile(r':([0-9]+\.[0-9]+)') #AJ
name_pattern =ts_regex.compile('\([a-z][a-z]+\)', ts_regex.casefold) #TS name_pattern =re.compile(r'([a-zA-Z]+)', re.I) #AJ
space_chars =' \t\n' space_chars =' \t\n'
delimiters ='-/.:,+' delimiters ='-/.:,+'
_month_len =((0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31), _month_len =((0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31),
...@@ -938,19 +938,17 @@ class DateTime: ...@@ -938,19 +938,17 @@ class DateTime:
if i > 0: b=i-1 if i > 0: b=i-1
else: b=i else: b=i
ts_results = fltpat.match_group(string, (1,), b) ts_results = fltpat.match(string, b)
if ts_results: if ts_results:
#s=ts_results[1][0] s=ts_results.group(1)
s=ts_results[1]
i=i+len(s) i=i+len(s)
ints.append(atof(s)) ints.append(atof(s))
continue continue
#TS #AJ
ts_results = intpat.match_group(string, (1,), i) ts_results = intpat.match(string, i)
if ts_results: if ts_results:
#s=ts_results[1][0] s=ts_results.group(0)
s=ts_results[1]
ls=len(s) ls=len(s)
i=i+ls i=i+ls
...@@ -963,9 +961,9 @@ class DateTime: ...@@ -963,9 +961,9 @@ class DateTime:
continue continue
ts_results = wordpat.match_group(string, (1,), i) ts_results = wordpat.match(string, i)
if ts_results: if ts_results:
o,s=ts_results[1],lower(ts_results[1]) o,s=ts_results.group(0),lower(ts_results.group(0))
i=i+len(s) i=i+len(s)
if i < l and string[i]=='.': i=i+1 if i < l and string[i]=='.': i=i+1
# Check for month name: # Check for month name:
...@@ -1386,10 +1384,7 @@ class DateTime: ...@@ -1386,10 +1384,7 @@ class DateTime:
def strftime(self, format): def strftime(self, format):
# Format the date/time using the *current timezone representation*. # Format the date/time using the *current timezone representation*.
diff = _tzoffset(self._tz, self._t) diff = _tzoffset(self._tz, self._t)
format = ts_regex.gsub('\(^\|[^%]\)%Z', format = re.sub('(^\|[^%])%z',
'\\1' + self._tz,
format)
format = ts_regex.gsub('\(^\|[^%]\)%z',
'\\1%+05d' % (diff / 36), '\\1%+05d' % (diff / 36),
format) format)
return strftime(format, safegmtime(self.timeTime() + diff)) return strftime(format, safegmtime(self.timeTime() + diff))
......
...@@ -139,3 +139,6 @@ class DateTimeTests (unittest.TestCase): ...@@ -139,3 +139,6 @@ class DateTimeTests (unittest.TestCase):
def test_suite(): def test_suite():
return unittest.makeSuite(DateTimeTests) return unittest.makeSuite(DateTimeTests)
if __name__=="__main__":
unittest.TextTestRunner().run(test_suite())
...@@ -84,37 +84,37 @@ ...@@ -84,37 +84,37 @@
############################################################################## ##############################################################################
"""HTML formated DocumentTemplates """HTML formated DocumentTemplates
$Id: DT_HTML.py,v 1.24 2000/08/17 14:03:42 brian Exp $""" $Id: DT_HTML.py,v 1.25 2001/04/27 18:07:09 andreas Exp $"""
from DT_String import String, FileMixin from DT_String import String, FileMixin
import DT_String, regex import DT_String, re
from DT_Util import ParseError, str from DT_Util import ParseError, str
from string import strip, find, split, join, rfind, replace from string import strip, find, split, join, rfind, replace
class dtml_re_class: class dtml_re_class:
""" This needs to be replaced before 2.4. It's a hackaround. """
def search(self, text, start=0, def search(self, text, start=0,
name_match=regex.compile('[\0- ]*[a-zA-Z]+[\0- ]*').match, name_match=re.compile(r'[\0- ]*[a-zA-Z]+[\0- ]*').match,
end_match=regex.compile('[\0- ]*\(/\|end\)', end_match=re.compile(r'[\0- ]*(/|end)', re.I).match,
regex.casefold).match, start_search=re.compile(r'[<&]').search,
start_search=regex.compile('[<&]').search, ent_name=re.compile(r'[-a-zA-Z0-9_.]+').match,
ent_name=regex.compile('[-a-zA-Z0-9_.]+').match,
find=find, find=find,
strip=strip, strip=strip,
replace=replace, replace=replace,
): ):
while 1: while 1:
s=start_search(text, start) mo = start_search(text,start)
if s < 0: return -1 if mo is None: return None
s = mo.start(0)
if text[s:s+5] == '<!--#': if text[s:s+5] == '<!--#':
n=s+5 n=s+5
e=find(text,'-->',n) e=find(text,'-->',n)
if e < 0: return -1 if e < 0: return None
en=3 en=3
l=end_match(text,n) mo =end_match(text,n)
if l > 0: if mo is not None:
l = mo.end(0) - mo.start(0)
end=strip(text[n:n+l]) end=strip(text[n:n+l])
n=n+l n=n+l
else: end='' else: end=''
...@@ -123,7 +123,7 @@ class dtml_re_class: ...@@ -123,7 +123,7 @@ class dtml_re_class:
e=n=s+6 e=n=s+6
while 1: while 1:
e=find(text,'>',e+1) e=find(text,'>',e+1)
if e < 0: return -1 if e < 0: return None
if len(split(text[n:e],'"'))%2: if len(split(text[n:e],'"'))%2:
# check for even number of "s inside # check for even number of "s inside
break break
...@@ -135,7 +135,7 @@ class dtml_re_class: ...@@ -135,7 +135,7 @@ class dtml_re_class:
e=n=s+7 e=n=s+7
while 1: while 1:
e=find(text,'>',e+1) e=find(text,'>',e+1)
if e < 0: return -1 if e < 0: return None
if len(split(text[n:e],'"'))%2: if len(split(text[n:e],'"'))%2:
# check for even number of "s inside # check for even number of "s inside
break break
...@@ -150,32 +150,38 @@ class dtml_re_class: ...@@ -150,32 +150,38 @@ class dtml_re_class:
if e >= 0: if e >= 0:
args=text[n:e] args=text[n:e]
l=len(args) l=len(args)
if ent_name(args) == l: mo = ent_name(args)
d=self.__dict__ if mo is not None:
if text[s+5]=='-': if mo.end(0)-mo.start(0) == l:
d[1]=d['end']='' d=self.__dict__
d[2]=d['name']='var' if text[s+5]=='-':
d[0]=text[s:e+1]
d[3]=d['args']=args+' html_quote'
return s
else:
nn=find(args,'-')
if nn >= 0 and nn < l-1:
d[1]=d['end']='' d[1]=d['end']=''
d[2]=d['name']='var' d[2]=d['name']='var'
d[0]=text[s:e+1] d[0]=text[s:e+1]
args=(args[nn+1:]+' '+ d[3]=d['args']=args+' html_quote'
replace(args[:nn],'.',' ')) self._start = s
d[3]=d['args']=args return self
return s else:
nn=find(args,'-')
if nn >= 0 and nn < l-1:
d[1]=d['end']=''
d[2]=d['name']='var'
d[0]=text[s:e+1]
args=(args[nn+1:]+' '+
replace(args[:nn],'.',' '))
d[3]=d['args']=args
self._start = s
return self
start=s+1 start=s+1
continue continue
break break
l=name_match(text,n) mo = name_match(text,n)
if l < 0: return l if mo is None: return None
l = mo.end(0) - mo.start(0)
a=n+l a=n+l
name=strip(text[n:a]) name=strip(text[n:a])
...@@ -186,8 +192,8 @@ class dtml_re_class: ...@@ -186,8 +192,8 @@ class dtml_re_class:
d[1]=d['end']=end d[1]=d['end']=end
d[2]=d['name']=name d[2]=d['name']=name
d[3]=d['args']=args d[3]=d['args']=args
self._start = s
return s return self
def group(self, *args): def group(self, *args):
get=self.__dict__.get get=self.__dict__.get
...@@ -195,7 +201,8 @@ class dtml_re_class: ...@@ -195,7 +201,8 @@ class dtml_re_class:
return get(args[0]) return get(args[0])
return tuple(map(get, args)) return tuple(map(get, args))
def start(self, *args):
return self._start
class HTML(DT_String.String): class HTML(DT_String.String):
"""HTML Document Templates """HTML Document Templates
......
...@@ -402,13 +402,13 @@ ...@@ -402,13 +402,13 @@
''' #' ''' #'
__rcs_id__='$Id: DT_In.py,v 1.48 2001/04/13 19:31:42 brian Exp $' __rcs_id__='$Id: DT_In.py,v 1.49 2001/04/27 18:07:09 andreas Exp $'
__version__='$Revision: 1.48 $'[11:-2] __version__='$Revision: 1.49 $'[11:-2]
from DT_Util import ParseError, parse_params, name_param, str from DT_Util import ParseError, parse_params, name_param, str
from DT_Util import render_blocks, InstanceDict, ValidationError, VSEval, expr_globals from DT_Util import render_blocks, InstanceDict, ValidationError, VSEval, expr_globals
from string import find, atoi, join, split, lower from string import find, atoi, join, split, lower
import ts_regex import re
from DT_InSV import sequence_variables, opt from DT_InSV import sequence_variables, opt
TupleType=type(()) TupleType=type(())
...@@ -471,11 +471,12 @@ class InClass: ...@@ -471,11 +471,12 @@ class InClass:
if type(v)==type(''): if type(v)==type(''):
try: atoi(v) try: atoi(v)
except: except:
self.start_name_re=ts_regex.compile(
self.start_name_re=re.compile(
'&+'+ '&+'+
join(map(lambda c: "[%s]" % c, v),'')+ join(map(lambda c: "[%s]" % c, v),'')+
'=[0-9]+&+') '=[0-9]+&+')
name,expr=name_param(args,'in',1) name,expr=name_param(args,'in',1)
if expr is not None: expr=expr.eval if expr is not None: expr=expr.eval
self.__name__, self.expr = name, expr self.__name__, self.expr = name, expr
......
...@@ -85,11 +85,12 @@ ...@@ -85,11 +85,12 @@
__doc__='''Sequence variables support __doc__='''Sequence variables support
$Id: DT_InSV.py,v 1.18 2001/01/16 21:57:19 chrism Exp $''' $Id: DT_InSV.py,v 1.19 2001/04/27 18:07:10 andreas Exp $'''
__version__='$Revision: 1.18 $'[11:-2] __version__='$Revision: 1.19 $'[11:-2]
from string import lower, rfind, split, join from string import lower, rfind, split, join
from math import sqrt from math import sqrt
import re
TupleType=type(()) TupleType=type(())
try: try:
import Missing import Missing
...@@ -199,6 +200,7 @@ class sequence_variables: ...@@ -199,6 +200,7 @@ class sequence_variables:
return l return l
def query(self, *ignored): def query(self, *ignored):
if self.start_name_re is None: raise KeyError, 'sequence-query' if self.start_name_re is None: raise KeyError, 'sequence-query'
query_string=self.query_string query_string=self.query_string
while query_string and query_string[:1] in '?&': while query_string and query_string[:1] in '?&':
...@@ -207,16 +209,26 @@ class sequence_variables: ...@@ -207,16 +209,26 @@ class sequence_variables:
query_string=query_string[:-1] query_string=query_string[:-1]
if query_string: if query_string:
query_string='&%s&' % query_string query_string='&%s&' % query_string
re=self.start_name_re reg=self.start_name_re
l=re.search_group(query_string, (0,))
if l: if type(reg)==type(re.compile(r"")):
v=l[1] mo = reg.search(query_string)
l=l[0] if mo is not None:
query_string=(query_string[:l]+ v = mo.group(0)
query_string[l+len(v)-1:]) l = mo.start(0)
query_string=(query_string[:l]+ query_string[l+len(v)-1:])
else:
l=reg.search_group(query_string, (0,))
if l:
v=l[1]
l=l[0]
query_string=(query_string[:l]+ query_string[l+len(v)-1:])
query_string='?'+query_string[1:] query_string='?'+query_string[1:]
else: query_string='?' else: query_string='?'
self.data['sequence-query']=query_string self.data['sequence-query']=query_string
return query_string return query_string
......
...@@ -112,8 +112,9 @@ ...@@ -112,8 +112,9 @@
as desired. as desired.
''' '''
from DT_Util import render_blocks, Eval, expr_globals, ParseError, regex, strip from DT_Util import render_blocks, Eval, expr_globals, ParseError, strip
from DT_Util import str # Probably needed due to hysterical pickles. from DT_Util import str # Probably needed due to hysterical pickles.
import re
class Let: class Let:
...@@ -149,29 +150,33 @@ class Let: ...@@ -149,29 +150,33 @@ class Let:
__call__ = render __call__ = render
def parse_let_params(text, def parse_let_params(text,
result=None, result=None,
tag='let', tag='let',
parmre=regex.compile( parmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'), r'([\0- ]*([^\0- =\"]+)=([^\0- =\"]+))'),
qparmre=regex.compile( qparmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'), r'([\0- ]*([^\0- =\"]+)="([^"]*)\")'),
**parms): **parms):
result=result or [] result=result or []
if parmre.match(text) >= 0: mo = parmre.match(text)
name=parmre.group(2) mo1= qparmre.match(text)
value=parmre.group(3)
l=len(parmre.group(1)) if mo is not None:
elif qparmre.match(text) >= 0: name=mo.group(2)
name=qparmre.group(2) value=mo.group(3)
value='"%s"' % qparmre.group(3) l=len(mo.group(1))
l=len(qparmre.group(1)) elif mo1 is not None:
name=mo1.group(2)
value='"%s"' % mo1.group(3)
l=len(mo1.group(1))
else: else:
if not text or not strip(text): return result if not text or not strip(text): return result
raise ParseError, ('invalid parameter: "%s"' % text, tag) raise ParseError, ('invalid parameter: "%s"' % text, tag)
result.append((name,value)) result.append((name,value))
text=strip(text[l:]) text=strip(text[l:])
......
...@@ -82,10 +82,10 @@ ...@@ -82,10 +82,10 @@
# attributions are listed in the accompanying credits file. # attributions are listed in the accompanying credits file.
# #
############################################################################## ##############################################################################
__version__='$Revision: 1.1 $'[11:-2] __version__='$Revision: 1.2 $'[11:-2]
from DT_Util import parse_params, name_param, html_quote, str from DT_Util import parse_params, name_param, html_quote, str
import regex, string, sys, regex import string, sys
from string import find, split, join, atoi, rfind from string import find, split, join, atoi, rfind
class ReturnTag: class ReturnTag:
......
############################################################################## ##############################################################################
# #
# Zope Public License (ZPL) Version 1.0 # Zope Public License (ZPL) Version 1.0
# ------------------------------------- # -------------------------------------
# #
# Copyright (c) Digital Creations. All rights reserved. # Copyright (c) Digital Ceeations. All rights reserved.
# #
# This license has been certified as Open Source(tm). # This license has been certified as Open Source(tm).
# #
...@@ -82,10 +83,10 @@ ...@@ -82,10 +83,10 @@
# attributions are listed in the accompanying credits file. # attributions are listed in the accompanying credits file.
# #
############################################################################## ##############################################################################
"$Id: DT_String.py,v 1.39 2000/12/12 21:20:25 shane Exp $" "$Id: DT_String.py,v 1.40 2001/04/27 18:07:10 andreas Exp $"
from string import split, strip from string import split, strip
import regex, ts_regex import thread,re
from DT_Util import ParseError, InstanceDict, TemplateDict, render_blocks, str from DT_Util import ParseError, InstanceDict, TemplateDict, render_blocks, str
from DT_Var import Var, Call, Comment from DT_Var import Var, Call, Comment
...@@ -154,15 +155,15 @@ class String: ...@@ -154,15 +155,15 @@ class String:
tagre__roles__=() tagre__roles__=()
def tagre(self): def tagre(self):
return regex.symcomp( return re.compile(
'%(' # beginning r'%(' # beginning
'\(<name>[a-zA-Z0-9_/.-]+\)' # tag name '(?P<name>[a-zA-Z0-9_/.-]+)' # tag name
'\(' '('
'[\0- ]+' # space after tag name '[\0- ]+' # space after tag name
'\(<args>\([^)"]+\("[^"]*"\)?\)*\)' # arguments '(?P<args>([^)"]+("[^"]*")?\)*)' # arguments
'\)?' ')?'
')\(<fmt>[0-9]*[.]?[0-9]*[a-z]\|[]![]\)' # end ')(?P<fmt>[0-9]*[.]?[0-9]*[a-z]\|[]![])' # end
, regex.casefold) , re.I)
_parseTag__roles__=() _parseTag__roles__=()
def _parseTag(self, tagre, command=None, sargs='', tt=type(())): def _parseTag(self, tagre, command=None, sargs='', tt=type(())):
...@@ -227,8 +228,9 @@ class String: ...@@ -227,8 +228,9 @@ class String:
def parse(self,text,start=0,result=None,tagre=None): def parse(self,text,start=0,result=None,tagre=None):
if result is None: result=[] if result is None: result=[]
if tagre is None: tagre=self.tagre() if tagre is None: tagre=self.tagre()
l=tagre.search(text,start) mo =tagre.search(text,start)
while l >= 0: while mo :
l = mo.start(0)
try: tag, args, command, coname = self._parseTag(tagre) try: tag, args, command, coname = self._parseTag(tagre)
except ParseError, m: self.parse_error(m[0],m[1],text,l) except ParseError, m: self.parse_error(m[0],m[1],text,l)
...@@ -248,17 +250,19 @@ class String: ...@@ -248,17 +250,19 @@ class String:
result.append(r) result.append(r)
except ParseError, m: self.parse_error(m[0],tag,text,l) except ParseError, m: self.parse_error(m[0],tag,text,l)
l=tagre.search(text,start) mo = tagre.search(text,start)
text=text[start:] text=text[start:]
if text: result.append(text) if text: result.append(text)
return result return result
skip_eol__roles__=() skip_eol__roles__=()
def skip_eol(self, text, start, eol=regex.compile('[ \t]*\n')): def skip_eol(self, text, start, eol=re.compile(r'[ \t]*\n')):
# if block open is followed by newline, then skip past newline # if block open is followed by newline, then skip past newline
l=eol.match(text,start) mo =eol.match(text,start)
if l > 0: start=start+l if mo is not None:
start = start + mo.end(0) - mo.start(0)
return start return start
parse_block__roles__=() parse_block__roles__=()
...@@ -274,8 +278,9 @@ class String: ...@@ -274,8 +278,9 @@ class String:
sa=sargs sa=sargs
while 1: while 1:
l=tagre.search(text,start) mo = tagre.search(text,start)
if l < 0: self.parse_error('No closing tag', stag, text, sloc) if mo is None: self.parse_error('No closing tag', stag, text, sloc)
l = mo.start(0)
try: tag, args, command, coname= self._parseTag(tagre,scommand,sa) try: tag, args, command, coname= self._parseTag(tagre,scommand,sa)
except ParseError, m: self.parse_error(m[0],m[1], text, l) except ParseError, m: self.parse_error(m[0],m[1], text, l)
...@@ -312,8 +317,9 @@ class String: ...@@ -312,8 +317,9 @@ class String:
parse_close__roles__=() parse_close__roles__=()
def parse_close(self, text, start, tagre, stag, sloc, scommand, sa): def parse_close(self, text, start, tagre, stag, sloc, scommand, sa):
while 1: while 1:
l=tagre.search(text,start) mo = tagre.search(text,start)
if l < 0: self.parse_error('No closing tag', stag, text, sloc) if mo is None: self.parse_error('No closing tag', stag, text, sloc)
l = mo.start(0)
try: tag, args, command, coname= self._parseTag(tagre,scommand,sa) try: tag, args, command, coname= self._parseTag(tagre,scommand,sa)
except ParseError, m: self.parse_error(m[0],m[1], text, l) except ParseError, m: self.parse_error(m[0],m[1], text, l)
...@@ -401,7 +407,7 @@ class String: ...@@ -401,7 +407,7 @@ class String:
cook__roles__=() cook__roles__=()
def cook(self, def cook(self,
cooklock=ts_regex.allocate_lock(), cooklock=thread.allocate_lock(),
): ):
cooklock.acquire() cooklock.acquire()
try: try:
......
...@@ -82,10 +82,11 @@ ...@@ -82,10 +82,11 @@
# attributions are listed in the accompanying credits file. # attributions are listed in the accompanying credits file.
# #
############################################################################## ##############################################################################
'''$Id: DT_Util.py,v 1.72 2001/01/22 16:36:16 brian Exp $''' '''$Id: DT_Util.py,v 1.73 2001/04/27 18:07:11 andreas Exp $'''
__version__='$Revision: 1.72 $'[11:-2] __version__='$Revision: 1.73 $'[11:-2]
import regex, string, math, os import string, math, os
import re
from string import strip, join, atoi, lower, split, find from string import strip, join, atoi, lower, split, find
import VSEval import VSEval
...@@ -449,14 +450,14 @@ ListType=type([]) ...@@ -449,14 +450,14 @@ ListType=type([])
def parse_params(text, def parse_params(text,
result=None, result=None,
tag='', tag='',
unparmre=regex.compile( unparmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)\)'), r'([\0- ]*([^\0- =\"]+))'),
qunparmre=regex.compile( qunparmre=re.compile(
'\([\0- ]*\("[^"]*"\)\)'), r'([\0- ]*("[^"]*"))'),
parmre=regex.compile( parmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'), r'([\0- ]*([^\0- =\"]+)=([^\0- =\"]+))'),
qparmre=regex.compile( qparmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'), r'([\0- ]*([^\0- =\"]+)="([^"]*)\")'),
**parms): **parms):
"""Parse tag parameters """Parse tag parameters
...@@ -482,17 +483,25 @@ def parse_params(text, ...@@ -482,17 +483,25 @@ def parse_params(text,
result=result or {} result=result or {}
if parmre.match(text) >= 0: # HACK - we precalculate all matches. Maybe we don't need them
name=lower(parmre.group(2)) # all. This should be fixed for performance issues
value=parmre.group(3)
l=len(parmre.group(1)) mo_p = parmre.match(text)
elif qparmre.match(text) >= 0: mo_q = qparmre.match(text)
name=lower(qparmre.group(2)) mo_unp = unparmre.match(text)
value=qparmre.group(3) mo_unq = qunparmre.match(text)
l=len(qparmre.group(1))
elif unparmre.match(text) >= 0: if mo_p:
name=unparmre.group(2) name=lower(mo_p.group(2))
l=len(unparmre.group(1)) value=mo_p.group(3)
l=len(mo_p.group(1))
elif mo_q:
name=lower(mo_q.group(2))
value=mo_q.group(3)
l=len(mo_q.group(1))
elif mo_unp:
name=mo_unp.group(2)
l=len(mo_unp.group(1))
if result: if result:
if parms.has_key(name): if parms.has_key(name):
if parms[name] is None: raise ParseError, ( if parms[name] is None: raise ParseError, (
...@@ -504,9 +513,9 @@ def parse_params(text, ...@@ -504,9 +513,9 @@ def parse_params(text,
else: else:
result['']=name result['']=name
return apply(parse_params,(text[l:],result),parms) return apply(parse_params,(text[l:],result),parms)
elif qunparmre.match(text) >= 0: elif mo_unq:
name=qunparmre.group(2) name=mo_unq.group(2)
l=len(qunparmre.group(1)) l=len(mo_unq.group(1))
if result: raise ParseError, ( if result: raise ParseError, (
'Invalid attribute name, "%s"' % name, tag) 'Invalid attribute name, "%s"' % name, tag)
else: result['']=name else: result['']=name
......
...@@ -217,11 +217,11 @@ Evaluating expressions without rendering results ...@@ -217,11 +217,11 @@ Evaluating expressions without rendering results
''' # ' ''' # '
__rcs_id__='$Id: DT_Var.py,v 1.37 2000/09/05 22:03:12 amos Exp $' __rcs_id__='$Id: DT_Var.py,v 1.38 2001/04/27 18:07:11 andreas Exp $'
__version__='$Revision: 1.37 $'[11:-2] __version__='$Revision: 1.38 $'[11:-2]
from DT_Util import parse_params, name_param, html_quote, str from DT_Util import parse_params, name_param, html_quote, str
import regex, string, sys, regex import re, string, sys
from string import find, split, join, atoi, rfind from string import find, split, join, atoi, rfind
from urllib import quote, quote_plus from urllib import quote, quote_plus
...@@ -373,8 +373,8 @@ def dollars_and_cents(v, name='(Unknown name)', md={}): ...@@ -373,8 +373,8 @@ def dollars_and_cents(v, name='(Unknown name)', md={}):
except: return '' except: return ''
def thousands_commas(v, name='(Unknown name)', md={}, def thousands_commas(v, name='(Unknown name)', md={},
thou=regex.compile( thou=re.compile(
"\([0-9]\)\([0-9][0-9][0-9]\([,.]\|$\)\)").search): r"([0-9])([0-9][0-9][0-9]([,.]\|$))").search):
v=str(v) v=str(v)
vl=split(v,'.') vl=split(v,'.')
if not vl: return v if not vl: return v
...@@ -382,8 +382,9 @@ def thousands_commas(v, name='(Unknown name)', md={}, ...@@ -382,8 +382,9 @@ def thousands_commas(v, name='(Unknown name)', md={},
del vl[0] del vl[0]
if vl: s='.'+join(vl,'.') if vl: s='.'+join(vl,'.')
else: s='' else: s=''
l=thou(v) mo=thou(v)
while l >= 0: while mo is not None:
l = mo.start(0)
v=v[:l+1]+','+v[l+1:] v=v[:l+1]+','+v[l+1:]
l=thou(v) l=thou(v)
return v+s return v+s
......
...@@ -84,12 +84,12 @@ ...@@ -84,12 +84,12 @@
############################################################################## ##############################################################################
"""Help system support module""" """Help system support module"""
__version__='$Revision: 1.7 $'[11:-2] __version__='$Revision: 1.8 $'[11:-2]
import Globals, Acquisition import Globals, Acquisition
import StructuredText.StructuredText import StructuredText.StructuredText
import sys, os, string, regex import sys, os, string, re
stx_class=StructuredText.StructuredText.HTML stx_class=StructuredText.StructuredText.HTML
...@@ -282,9 +282,9 @@ class classobject(object): ...@@ -282,9 +282,9 @@ class classobject(object):
# needs to be tested !!! The conversion of reconvert.convert looks suspicious
pre_match=regex.compile('[A-Za-z0-9_]*([^)]*)[ -]*').match #TS sig_match=re.compile(r'[\w]*\([^)]*\)').match # matches "f(arg1, arg2)"
sig_match=regex.compile('[A-Za-z0-9_]*([^)]*)').match #TS pre_match=re.compile(r'[\w]*\([^)]*\)[ -]*').match # with ' ' or '-' included
class methodobject(object): class methodobject(object):
...@@ -309,9 +309,9 @@ class methodobject(object): ...@@ -309,9 +309,9 @@ class methodobject(object):
if hasattr(func, 'func_code'): if hasattr(func, 'func_code'):
if hasattr(func.func_code, 'co_varnames'): if hasattr(func.func_code, 'co_varnames'):
return doc return doc
n=pre_match(doc) mo=pre_match(doc)
if n > -1: if mo is not None:
return doc[n:] return doc[mo.end(0):]
return doc return doc
def get_signaturex(self): def get_signaturex(self):
...@@ -348,9 +348,9 @@ class methodobject(object): ...@@ -348,9 +348,9 @@ class methodobject(object):
doc=func.__doc__ doc=func.__doc__
if not doc: doc='' if not doc: doc=''
doc=string.strip(doc) doc=string.strip(doc)
n=sig_match(doc) mo=sig_match(doc)
if n > -1: if mo is not None:
return doc[:n] return doc[:mo.end(0)]
return '%s()' % name return '%s()' % name
......
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
############################################################################## ##############################################################################
"""DTML Method objects.""" """DTML Method objects."""
__version__='$Revision: 1.62 $'[11:-2] __version__='$Revision: 1.63 $'[11:-2]
import History import History
from Globals import HTML, DTMLFile, MessageDialog from Globals import HTML, DTMLFile, MessageDialog
...@@ -402,7 +402,7 @@ class DTMLMethod(HTML, Acquisition.Implicit, RoleManager, ...@@ -402,7 +402,7 @@ class DTMLMethod(HTML, Acquisition.Implicit, RoleManager,
import re import re
from string import find, strip from string import find, strip
token = "[a-zA-Z0-9!#$%&'*+\-.\\\\^_`|~]+" token = "[a-zA-Z0-9!#$%&'*+\-.\\\\^_`|~]+"
hdr_start = re.compile('(%s):(.*)' % token).match hdr_start = re.compile(r'(%s):(.*)' % token).match
def decapitate(html, RESPONSE=None): def decapitate(html, RESPONSE=None):
headers = [] headers = []
......
...@@ -84,12 +84,12 @@ ...@@ -84,12 +84,12 @@
############################################################################## ##############################################################################
__doc__="""Object Manager __doc__="""Object Manager
$Id: ObjectManager.py,v 1.135 2001/04/26 00:14:15 andreas Exp $""" $Id: ObjectManager.py,v 1.136 2001/04/27 18:07:12 andreas Exp $"""
__version__='$Revision: 1.135 $'[11:-2] __version__='$Revision: 1.136 $'[11:-2]
import App.Management, Acquisition, Globals, CopySupport, Products import App.Management, Acquisition, Globals, CopySupport, Products
import os, App.FactoryDispatcher, ts_regex, Products import os, App.FactoryDispatcher, re, Products
from OFS.Traversable import Traversable from OFS.Traversable import Traversable
from Globals import DTMLFile, Persistent from Globals import DTMLFile, Persistent
from Globals import MessageDialog, default__class_init__ from Globals import MessageDialog, default__class_init__
...@@ -109,7 +109,7 @@ customImporters={ ...@@ -109,7 +109,7 @@ customImporters={
XMLExportImport.magic: XMLExportImport.importXML, XMLExportImport.magic: XMLExportImport.importXML,
} }
bad_id=ts_regex.compile('[^a-zA-Z0-9-_~\,\. ]').search #TS bad_id=re.compile(r'[^a-zA-Z0-9-_~,. ]').search #TS
# Global constants: __replaceable__ flags: # Global constants: __replaceable__ flags:
NOT_REPLACEABLE = 0 NOT_REPLACEABLE = 0
...@@ -126,7 +126,7 @@ def checkValidId(self, id, allow_dup=0): ...@@ -126,7 +126,7 @@ def checkValidId(self, id, allow_dup=0):
# set to false before the object is added. # set to false before the object is added.
if not id or (type(id) != type('')): if not id or (type(id) != type('')):
raise BadRequestException, 'Empty or invalid id specified.' raise BadRequestException, 'Empty or invalid id specified.'
if bad_id(id) != -1: if bad_id(id) is not None:
raise BadRequestException, ( raise BadRequestException, (
'The id "%s" contains characters illegal in URLs.' % id) 'The id "%s" contains characters illegal in URLs.' % id)
if id[0]=='_': raise BadRequestException, ( if id[0]=='_': raise BadRequestException, (
......
...@@ -89,10 +89,10 @@ Aqueduct database adapters, etc. ...@@ -89,10 +89,10 @@ Aqueduct database adapters, etc.
This module can also be used as a simple template for implementing new This module can also be used as a simple template for implementing new
item types. item types.
$Id: SimpleItem.py,v 1.88 2001/04/18 18:00:07 chrism Exp $''' $Id: SimpleItem.py,v 1.89 2001/04/27 18:07:13 andreas Exp $'''
__version__='$Revision: 1.88 $'[11:-2] __version__='$Revision: 1.89 $'[11:-2]
import ts_regex, sys, Globals, App.Management, Acquisition, App.Undo import re, sys, Globals, App.Management, Acquisition, App.Undo
import AccessControl.Role, AccessControl.Owned, App.Common import AccessControl.Role, AccessControl.Owned, App.Common
from webdav.Resource import Resource from webdav.Resource import Resource
from ExtensionClass import Base from ExtensionClass import Base
...@@ -213,7 +213,7 @@ class Item(Base, Resource, CopySource, App.Management.Tabs, Traversable, ...@@ -213,7 +213,7 @@ class Item(Base, Resource, CopySource, App.Management.Tabs, Traversable,
self, client=None, REQUEST={}, self, client=None, REQUEST={},
error_type=None, error_value=None, tb=None, error_type=None, error_value=None, tb=None,
error_tb=None, error_message='', error_tb=None, error_message='',
tagSearch=ts_regex.compile('[a-zA-Z]>').search): tagSearch=re.compile(r'[a-zA-Z]>').search):
try: try:
if error_type is None: error_type =sys.exc_info()[0] if error_type is None: error_type =sys.exc_info()[0]
...@@ -245,10 +245,10 @@ class Item(Base, Resource, CopySource, App.Management.Tabs, Traversable, ...@@ -245,10 +245,10 @@ class Item(Base, Resource, CopySource, App.Management.Tabs, Traversable,
except: except:
pass pass
else: else:
if tagSearch(s) >= 0: if tagSearch(s) is not None:
error_message=error_value error_message=error_value
elif (type(error_value) is StringType elif (type(error_value) is StringType
and tagSearch(error_value) >= 0): and tagSearch(error_value) is not None):
error_message=error_value error_message=error_value
if client is None: client=self if client is None: client=self
......
...@@ -83,13 +83,13 @@ ...@@ -83,13 +83,13 @@
# #
############################################################################## ##############################################################################
"""A utility module for content-type handling.""" """A utility module for content-type handling."""
__version__='$Revision: 1.13 $'[11:-2] __version__='$Revision: 1.14 $'[11:-2]
from string import split, strip, lower, find from string import split, strip, lower, find
import ts_regex, mimetypes import re, mimetypes
find_binary=ts_regex.compile('[\0-\7]').search find_binary=re.compile('[\0-\7]').search
def text_type(s): def text_type(s):
# Yuk. See if we can figure out the type by content. # Yuk. See if we can figure out the type by content.
...@@ -151,7 +151,7 @@ def guess_content_type(name='', body='', default=None): ...@@ -151,7 +151,7 @@ def guess_content_type(name='', body='', default=None):
type, enc=mimetypes.guess_type(name) type, enc=mimetypes.guess_type(name)
if type is None: if type is None:
if body: if body:
if find_binary(body) >= 0: if find_binary(body) is not None:
type=default or 'application/octet-stream' type=default or 'application/octet-stream'
else: else:
type=(default or text_type(body) type=(default or text_type(body)
......
...@@ -86,9 +86,8 @@ ...@@ -86,9 +86,8 @@
from Persistence import Persistent from Persistence import Persistent
import Acquisition import Acquisition
import ExtensionClass import ExtensionClass
from SearchIndex import UnIndex, UnTextIndex, UnKeywordIndex, Query from SearchIndex import UnIndex, UnTextIndex, UnKeywordIndex
from SearchIndex.Lexicon import Lexicon from SearchIndex.Lexicon import Lexicon
import regex, pdb
from MultiMapping import MultiMapping from MultiMapping import MultiMapping
from string import lower from string import lower
import Record import Record
...@@ -106,19 +105,6 @@ from SearchIndex.randid import randid ...@@ -106,19 +105,6 @@ from SearchIndex.randid import randid
import time import time
def orify(seq,
query_map={
type(regex.compile('')): Query.Regex,
type(''): Query.String,
}):
subqueries=[]
for q in seq:
try: q=query_map[type(q)](q)
except KeyError: q=Query.Cmp(q)
subqueries.append(q)
return apply(Query.Or,tuple(subqueries))
class Catalog(Persistent, Acquisition.Implicit, ExtensionClass.Base): class Catalog(Persistent, Acquisition.Implicit, ExtensionClass.Base):
""" An Object Catalog """ An Object Catalog
...@@ -607,13 +593,7 @@ class Catalog(Persistent, Acquisition.Implicit, ExtensionClass.Base): ...@@ -607,13 +593,7 @@ class Catalog(Persistent, Acquisition.Implicit, ExtensionClass.Base):
return used return used
def searchResults(self, REQUEST=None, used=None, def searchResults(self, REQUEST=None, used=None, **kw):
query_map={
type(regex.compile('')): Query.Regex,
type([]): orify,
type(''): Query.String,
}, **kw):
# Get search arguments: # Get search arguments:
if REQUEST is None and not kw: if REQUEST is None and not kw:
try: REQUEST=self.REQUEST try: REQUEST=self.REQUEST
......
...@@ -90,17 +90,15 @@ from OFS.Folder import Folder ...@@ -90,17 +90,15 @@ from OFS.Folder import Folder
from OFS.FindSupport import FindSupport from OFS.FindSupport import FindSupport
from DateTime import DateTime from DateTime import DateTime
from SearchIndex import Query from SearchIndex import Query
import string, regex, urlparse, urllib, os, sys, time import string, urlparse, urllib, os, sys, time
import Products import Products
from Acquisition import Implicit from Acquisition import Implicit
from Persistence import Persistent from Persistence import Persistent
from DocumentTemplate.DT_Util import InstanceDict, TemplateDict from DocumentTemplate.DT_Util import InstanceDict, TemplateDict
from DocumentTemplate.DT_Util import Eval, expr_globals from DocumentTemplate.DT_Util import Eval, expr_globals
from AccessControl.Permission import name_trans from AccessControl.Permission import name_trans
from Catalog import Catalog, orify, CatalogError from Catalog import Catalog, CatalogError
from SearchIndex import UnIndex, UnTextIndex
from Vocabulary import Vocabulary from Vocabulary import Vocabulary
from Shared.DC.ZRDB.TM import TM
from AccessControl import getSecurityManager from AccessControl import getSecurityManager
from zLOG import LOG, ERROR from zLOG import LOG, ERROR
...@@ -518,21 +516,14 @@ class ZCatalog(Folder, Persistent, Implicit): ...@@ -518,21 +516,14 @@ class ZCatalog(Folder, Persistent, Implicit):
'width': 8}) 'width': 8})
return r return r
def searchResults(self, REQUEST=None, used=None, def searchResults(self, REQUEST=None, used=None, **kw):
query_map={
type(regex.compile('')): Query.Regex,
type([]): orify,
type(()): orify,
type(''): Query.String,
}, **kw):
""" """
Search the catalog according to the ZTables search interface. Search the catalog according to the ZTables search interface.
Search terms can be passed in the REQUEST or as keyword Search terms can be passed in the REQUEST or as keyword
arguments. arguments.
""" """
return apply(self._catalog.searchResults, return apply(self._catalog.searchResults, (REQUEST,used), kw)
(REQUEST,used, query_map), kw)
__call__=searchResults __call__=searchResults
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
</td> </td>
<td align="left" valign="top"> <td align="left" valign="top">
<div class="form-text"> <div class="form-text">
<dtml-with name="aq_self" only> <dtml-with sequence-item only>
<dtml-if name="meta_type"> <dtml-if name="meta_type">
<dtml-var name="meta_type" size="15"> <dtml-var name="meta_type" size="15">
<dtml-else> <dtml-else>
......
...@@ -202,7 +202,7 @@ Notes on a new text index design ...@@ -202,7 +202,7 @@ Notes on a new text index design
space. space.
""" """
__version__='$Revision: 1.28 $'[11:-2] __version__='$Revision: 1.29 $'[11:-2]
#XXX I strongly suspect that this is broken, but I'm not going to fix it. :( #XXX I strongly suspect that this is broken, but I'm not going to fix it. :(
...@@ -212,7 +212,7 @@ from BTrees.IIBTree import IISet, IIBucket ...@@ -212,7 +212,7 @@ from BTrees.IIBTree import IISet, IIBucket
import operator import operator
from Splitter import Splitter from Splitter import Splitter
from string import strip from string import strip
import string, ts_regex, regex import string, re
from Lexicon import Lexicon, stop_word_dict from Lexicon import Lexicon, stop_word_dict
from ResultList import ResultList from ResultList import ResultList
...@@ -463,7 +463,7 @@ QueryError='TextIndex.QueryError' ...@@ -463,7 +463,7 @@ QueryError='TextIndex.QueryError'
def query(s, index, default_operator = Or, def query(s, index, default_operator = Or,
ws = (string.whitespace,)): ws = (string.whitespace,)):
# First replace any occurences of " and not " with " andnot " # First replace any occurences of " and not " with " andnot "
s = ts_regex.gsub('[%s]+and[%s]+not[%s]+' % (ws * 3), ' andnot ', s) s = re.sub('[%s]+and[%s]+not[%s]+' % (ws * 3), ' andnot ', s)
q = parse(s) q = parse(s)
q = parse2(q, default_operator) q = parse2(q, default_operator)
return evaluate(q, index) return evaluate(q, index)
...@@ -515,13 +515,13 @@ def parse2(q, default_operator, ...@@ -515,13 +515,13 @@ def parse2(q, default_operator,
return q return q
def parens(s, parens_re = regex.compile('(\|)').search): def parens(s, parens_re = re.compile(r'(\|)').search):
index=open_index=paren_count = 0 index=open_index=paren_count = 0
while 1: while 1:
index = parens_re(s, index) index = parens_re(s, index)
if index < 0 : break if index is None : break
if s[index] == '(': if s[index] == '(':
paren_count = paren_count + 1 paren_count = paren_count + 1
...@@ -543,7 +543,7 @@ def parens(s, parens_re = regex.compile('(\|)').search): ...@@ -543,7 +543,7 @@ def parens(s, parens_re = regex.compile('(\|)').search):
def quotes(s, ws = (string.whitespace,)): def quotes(s, ws = (string.whitespace,)):
# split up quoted regions # split up quoted regions
splitted = ts_regex.split(s, '[%s]*\"[%s]*' % (ws * 2)) splitted = re.split( '[%s]*\"[%s]*' % (ws * 2),s)
split=string.split split=string.split
if (len(splitted) > 1): if (len(splitted) > 1):
......
...@@ -91,10 +91,10 @@ undo information so that objects can be unindexed when the old value ...@@ -91,10 +91,10 @@ undo information so that objects can be unindexed when the old value
is no longer known. is no longer known.
""" """
__version__ = '$Revision: 1.46 $'[11:-2] __version__ = '$Revision: 1.47 $'[11:-2]
import string, regex, regsub, ts_regex import string, re
import operator import operator
from Globals import Persistent from Globals import Persistent
...@@ -558,7 +558,7 @@ class UnTextIndex(Persistent, Implicit): ...@@ -558,7 +558,7 @@ class UnTextIndex(Persistent, Implicit):
parsed again, then the whole thing is 'evaluated'. """ parsed again, then the whole thing is 'evaluated'. """
# First replace any occurences of " and not " with " andnot " # First replace any occurences of " and not " with " andnot "
s = ts_regex.gsub( s = re.sub(
'[%s]+[aA][nN][dD][%s]*[nN][oO][tT][%s]+' % (ws * 3), '[%s]+[aA][nN][dD][%s]*[nN][oO][tT][%s]+' % (ws * 3),
' andnot ', s) ' andnot ', s)
...@@ -700,13 +700,13 @@ def parse2(q, default_operator, ...@@ -700,13 +700,13 @@ def parse2(q, default_operator,
return q return q
def parens(s, parens_re=regex.compile('(\|)').search): def parens(s, parens_re=re.compile(r'(\|)').search):
index = open_index = paren_count = 0 index = open_index = paren_count = 0
while 1: while 1:
index = parens_re(s, index) index = parens_re(s, index)
if index < 0 : break if index is None : break
if s[index] == '(': if s[index] == '(':
paren_count = paren_count + 1 paren_count = paren_count + 1
...@@ -728,7 +728,7 @@ def parens(s, parens_re=regex.compile('(\|)').search): ...@@ -728,7 +728,7 @@ def parens(s, parens_re=regex.compile('(\|)').search):
def quotes(s, ws=(string.whitespace,)): def quotes(s, ws=(string.whitespace,)):
# split up quoted regions # split up quoted regions
splitted = ts_regex.split(s, '[%s]*\"[%s]*' % (ws * 2)) splitted = re.split( '[%s]*\"[%s]*' % (ws * 2),s)
split=string.split split=string.split
if (len(splitted) > 1): if (len(splitted) > 1):
...@@ -752,3 +752,4 @@ def quotes(s, ws=(string.whitespace,)): ...@@ -752,3 +752,4 @@ def quotes(s, ws=(string.whitespace,)):
splitted = filter(None, split(s)) splitted = filter(None, split(s))
return splitted return splitted
...@@ -109,7 +109,7 @@ class NameAssignments: ...@@ -109,7 +109,7 @@ class NameAssignments:
('name_subpath', 'self._getTraverseSubpath()'), ('name_subpath', 'self._getTraverseSubpath()'),
) )
_isLegalName = re.compile('_$|[a-zA-Z][a-zA-Z0-9_]*$').match _isLegalName = re.compile(r'_$|[a-zA-Z][a-zA-Z0-9_]*$').match
_asgns = {} _asgns = {}
__allow_access_to_unprotected_subobjects__ = 1 __allow_access_to_unprotected_subobjects__ = 1
......
...@@ -84,12 +84,12 @@ ...@@ -84,12 +84,12 @@
############################################################################## ##############################################################################
__doc__='''Shared classes and functions __doc__='''Shared classes and functions
$Id: Aqueduct.py,v 1.44 2001/01/15 16:07:39 brian Exp $''' $Id: Aqueduct.py,v 1.45 2001/04/27 18:07:16 andreas Exp $'''
__version__='$Revision: 1.44 $'[11:-2] __version__='$Revision: 1.45 $'[11:-2]
import Globals, os import Globals, os
from Globals import Persistent from Globals import Persistent
import DocumentTemplate, DateTime, ts_regex, regex, string import DocumentTemplate, DateTime, re, string
import binascii, Acquisition import binascii, Acquisition
DateTime.now=DateTime.DateTime DateTime.now=DateTime.DateTime
from cStringIO import StringIO from cStringIO import StringIO
...@@ -275,7 +275,7 @@ custom_default_report_src=DocumentTemplate.File( ...@@ -275,7 +275,7 @@ custom_default_report_src=DocumentTemplate.File(
os.path.join(dtml_dir,'customDefaultReport.dtml')) os.path.join(dtml_dir,'customDefaultReport.dtml'))
def custom_default_report(id, result, action='', no_table=0, def custom_default_report(id, result, action='', no_table=0,
goofy=regex.compile('[^a-zA-Z0-9_]').search goofy=re.compile(r'\W').search
): ):
columns=result._searchable_result_columns() columns=result._searchable_result_columns()
__traceback_info__=columns __traceback_info__=columns
...@@ -294,7 +294,8 @@ def custom_default_report(id, result, action='', no_table=0, ...@@ -294,7 +294,8 @@ def custom_default_report(id, result, action='', no_table=0,
row=[] row=[]
for c in columns: for c in columns:
n=c['name'] n=c['name']
if goofy(n) >= 0: n='expr="_[\'%s]"' % (`'"'+n`[2:]) if goofy(n) is not None:
n='expr="_[\'%s]"' % (`'"'+n`[2:])
row.append(' %s<dtml-var %s%s>%s' row.append(' %s<dtml-var %s%s>%s'
% (td,n,c['type']!='s' and ' null=""' or '',_td)) % (td,n,c['type']!='s' and ' null=""' or '',_td))
...@@ -342,12 +343,12 @@ class Args: ...@@ -342,12 +343,12 @@ class Args:
def parse(text, def parse(text,
result=None, result=None,
keys=None, keys=None,
unparmre=ts_regex.compile( unparmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)\)'), r'([\0- ]*([^\0- =\"]+))'),
parmre=ts_regex.compile( parmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'), r'([\0- ]*([^\0- =\"]+)=([^\0- =\"]+))'),
qparmre=ts_regex.compile( qparmre=re.compile(
'\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'), r'([\0- ]*([^\0- =\"]+)="([^"]*)\")'),
): ):
if result is None: if result is None:
...@@ -356,25 +357,22 @@ def parse(text, ...@@ -356,25 +357,22 @@ def parse(text,
__traceback_info__=text __traceback_info__=text
ts_results = parmre.match_group(text, (1,2,3)) mo = parmre.match(text)
if ts_results: if mo:
start, grps = ts_results name=mo.group(1)
name=grps[1] value={'default':mo.group(2)}
value={'default':grps[2]} l=len(mo.group(0))
l=len(grps[0])
else: else:
ts_results = qparmre.match_group(text, (1,2,3)) mo = qparmre.match(text)
if ts_results: if mo:
start, grps = ts_results name=mo.group(0)
name=grps[1] value={'default':mo.group(2)}
value={'default':grps[2]} l=len(mo.group(0))
l=len(grps[0])
else: else:
ts_results = unparmre.match_group(text, (1,2)) mo = unparmre.match(text)
if ts_results: if ts_results:
start, grps = ts_results name=mo.group(1)
name=grps[1] l=len(mo.group(0))
l=len(grps[0])
value={} value={}
else: else:
if not text or not strip(text): return Args(result,keys) if not text or not strip(text): return Args(result,keys)
...@@ -409,22 +407,22 @@ def nicify(name): ...@@ -409,22 +407,22 @@ def nicify(name):
return string.upper(name[:1])+name[1:] return string.upper(name[:1])+name[1:]
def decapitate(html, RESPONSE=None, def decapitate(html, RESPONSE=None,
header_re=ts_regex.compile( header_re=re.compile(
'\(\(' r'(('
'[^\0- <>:]+:[^\n]*\n' '[^\0- <>:]+:[^\n]*\n'
'\|' '\|'
'[ \t]+[^\0- ][^\n]*\n' '[ \t]+[^\0- ][^\n]*\n'
'\)+\)[ \t]*\n\([\0-\377]+\)' ')+)[ \t]*\n([\0-\377]+)'
), ),
space_re=ts_regex.compile('\([ \t]+\)'), space_re=re.compile(r'([ \t]+)'),
name_re=ts_regex.compile('\([^\0- <>:]+\):\([^\n]*\)'), name_re=re.compile(r'([^\0- <>:]+):([^\n]*)'),
): ):
ts_results = header_re.match_group(html, (1,3)) mo = header_re.match(html)
if not ts_results: return html if mo is None: return html
headers, html = ts_results[1] headers, html = mo.group(1,3)
headers=string.split(headers,'\n') headers=string.split(headers,'\n')
...@@ -433,18 +431,18 @@ def decapitate(html, RESPONSE=None, ...@@ -433,18 +431,18 @@ def decapitate(html, RESPONSE=None,
if not headers[i]: if not headers[i]:
del headers[i] del headers[i]
else: else:
ts_results = space_re.match_group(headers[i], (1,)) mo = space_re.match(headers[i])
if ts_results: if mo:
headers[i-1]="%s %s" % (headers[i-1], headers[i-1]="%s %s" % (headers[i-1],
headers[i][len(ts_reults[1]):]) headers[i][len(mo.group(1)):])
del headers[i] del headers[i]
else: else:
i=i+1 i=i+1
for i in range(len(headers)): for i in range(len(headers)):
ts_results = name_re.match_group(headers[i], (1,2)) mo = name_re.match(headers[i])
if ts_reults: if mo:
k, v = ts_reults[1] k,v = mo.group(1,2)
v=string.strip(v) v=string.strip(v)
else: else:
raise ValueError, 'Invalid Header (%d): %s ' % (i,headers[i]) raise ValueError, 'Invalid Header (%d): %s ' % (i,headers[i])
......
...@@ -85,11 +85,10 @@ ...@@ -85,11 +85,10 @@
__doc__='''Class for reading RDB files __doc__='''Class for reading RDB files
$Id: RDB.py,v 1.29 2000/12/21 17:12:00 brian Exp $''' $Id: RDB.py,v 1.30 2001/04/27 18:07:16 andreas Exp $'''
__version__='$Revision: 1.29 $'[11:-2] __version__='$Revision: 1.30 $'[11:-2]
import regex, regsub from string import split, strip, lower, upper, atof, atoi, atol, find, join,find
from string import split, strip, lower, upper, atof, atoi, atol, find, join
import DateTime import DateTime
from Missing import MV from Missing import MV
from array import array from array import array
...@@ -136,8 +135,8 @@ class DatabaseResults: ...@@ -136,8 +135,8 @@ class DatabaseResults:
self._parent=parent self._parent=parent
if zbrains is None: zbrains=NoBrains if zbrains is None: zbrains=NoBrains
comment_pattern=regex.compile('#')
while line and comment_pattern.match(line) >= 0: line=readline() while line and line.find('#') != -1 : line=readline()
line=line[:-1] line=line[:-1]
if line and line[-1:] in '\r\n': line=line[:-1] if line and line[-1:] in '\r\n': line=line[:-1]
...@@ -174,14 +173,14 @@ class DatabaseResults: ...@@ -174,14 +173,14 @@ class DatabaseResults:
i=0 i=0
self._parsers=parsers=[] self._parsers=parsers=[]
defre=regex.compile('\([0-9]*\)\([a-zA-Z]\)?') defre=re.compile(r'([0-9]*)([a-zA-Z])?')
self._data_dictionary=dd={} self._data_dictionary=dd={}
self.__items__=items=[] self.__items__=items=[]
for _def in defs: for _def in defs:
_def=strip(_def) _def=strip(_def)
if not _def: if not _def:
raise ValueError, ('Empty column definition for %s' % names[i]) raise ValueError, ('Empty column definition for %s' % names[i])
if defre.match(_def) < 0: if defre.match(_def) is None:
raise ValueError, ( raise ValueError, (
'Invalid column definition for, %s, for %s' 'Invalid column definition for, %s, for %s'
% _def, names[i]) % _def, names[i])
......
...@@ -93,7 +93,6 @@ from string import replace ...@@ -93,7 +93,6 @@ from string import replace
import struct import struct
import base64 import base64
import string import string
import regex
import pickle import pickle
import tempfile import tempfile
import marshal import marshal
......
...@@ -85,6 +85,7 @@ ...@@ -85,6 +85,7 @@
import re, ST, STDOM import re, ST, STDOM
from string import split, join, replace, expandtabs, strip, find from string import split, join, replace, expandtabs, strip, find
from STletters import letters
StringType=type('') StringType=type('')
ListType=type([]) ListType=type([])
...@@ -500,7 +501,7 @@ class DocumentClass: ...@@ -500,7 +501,7 @@ class DocumentClass:
def doc_numbered( def doc_numbered(
self, paragraph, self, paragraph,
expr = re.compile('(\s*[a-zA-Z]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)').match): expr = re.compile('(\s*[%s]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)' % letters).match):
# This is the old expression. It had a nasty habit # This is the old expression. It had a nasty habit
# of grabbing paragraphs that began with a single # of grabbing paragraphs that began with a single
...@@ -549,7 +550,7 @@ class DocumentClass: ...@@ -549,7 +550,7 @@ class DocumentClass:
delim=d) delim=d)
def doc_header(self, paragraph, def doc_header(self, paragraph,
expr = re.compile('[ a-zA-Z0-9.:/,-_*<>\?\'\"]+').match expr = re.compile('[ %s0-9.:/,-_*<>\?\'\"]+' % letters).match
): ):
subs=paragraph.getSubparagraphs() subs=paragraph.getSubparagraphs()
if not subs: return None if not subs: return None
...@@ -583,7 +584,7 @@ class DocumentClass: ...@@ -583,7 +584,7 @@ class DocumentClass:
def doc_emphasize( def doc_emphasize(
self, s, self, s,
expr = re.compile('\s*\*([ \na-zA-Z0-9.:/;,\'\"\?\=\-\>\<\(\)]+)\*(?!\*|-)').search expr = re.compile('\s*\*([ \n%s0-9.:/;,\'\"\?\=\-\>\<\(\)]+)\*(?!\*|-)' % letters).search
): ):
r=expr(s) r=expr(s)
...@@ -596,7 +597,7 @@ class DocumentClass: ...@@ -596,7 +597,7 @@ class DocumentClass:
def doc_inner_link(self, def doc_inner_link(self,
s, s,
expr1 = re.compile("\.\.\s*").search, expr1 = re.compile("\.\.\s*").search,
expr2 = re.compile("\[[a-zA-Z0-9]+\]").search): expr2 = re.compile("\[[%s0-9]+\]" % letters).search):
# make sure we dont grab a named link # make sure we dont grab a named link
if expr2(s) and expr1(s): if expr2(s) and expr1(s):
...@@ -616,7 +617,7 @@ class DocumentClass: ...@@ -616,7 +617,7 @@ class DocumentClass:
def doc_named_link(self, def doc_named_link(self,
s, s,
expr=re.compile("(\.\.\s)(\[[a-zA-Z0-9]+\])").search): expr=re.compile("(\.\.\s)(\[[%s0-9]+\])" % letters).search):
result = expr(s) result = expr(s)
if result: if result:
...@@ -631,7 +632,7 @@ class DocumentClass: ...@@ -631,7 +632,7 @@ class DocumentClass:
def doc_underline(self, def doc_underline(self,
s, s,
expr=re.compile("\_([a-zA-Z0-9\s\.,\?\/]+)\_").search): expr=re.compile("\_([%s0-9\s\.,\?\/]+)\_" % letters).search):
result = expr(s) result = expr(s)
if result: if result:
...@@ -643,7 +644,7 @@ class DocumentClass: ...@@ -643,7 +644,7 @@ class DocumentClass:
def doc_strong(self, def doc_strong(self,
s, s,
expr = re.compile('\s*\*\*([ \na-zA-Z0-9.:/;\-,!\?\'\"]+)\*\*').search expr = re.compile('\s*\*\*([ \n%s0-9.:/;\-,!\?\'\"]+)\*\*' % letters).search
): ):
r=expr(s) r=expr(s)
...@@ -656,8 +657,8 @@ class DocumentClass: ...@@ -656,8 +657,8 @@ class DocumentClass:
def doc_href( def doc_href(
self, s, self, s,
expr1 = re.compile("(\"[ a-zA-Z0-9\n\-\.\,\;\(\)\/\:\/\*\']+\")(:)([a-zA-Z0-9\:\/\.\~\-]+)([,]*\s*)").search, expr1 = re.compile("(\"[ %s0-9\n\-\.\,\;\(\)\/\:\/\*\']+\")(:)([a-zA-Z0-9\:\/\.\~\-]+)([,]*\s*)" % letters).search,
expr2 = re.compile('(\"[ a-zA-Z0-9\n\-\.\:\;\(\)\/\*\']+\")([,]+\s+)([a-zA-Z0-9\@\.\,\?\!\/\:\;\-\#]+)(\s*)').search): expr2 = re.compile('(\"[ %s0-9\n\-\.\:\;\(\)\/\*\']+\")([,]+\s+)([a-zA-Z0-9\@\.\,\?\!\/\:\;\-\#]+)(\s*)' % letters).search):
#expr1=re.compile('\"([ a-zA-Z0-9.:/;,\n\~\(\)\-]+)\"' #expr1=re.compile('\"([ a-zA-Z0-9.:/;,\n\~\(\)\-]+)\"'
# ':' # ':'
......
This diff is collapsed.
...@@ -85,6 +85,8 @@ ...@@ -85,6 +85,8 @@
import re, ST, STDOM import re, ST, STDOM
from string import split, join, replace, expandtabs, strip, find, rstrip from string import split, join, replace, expandtabs, strip, find, rstrip
from STletters import letters
StringType=type('') StringType=type('')
ListType=type([]) ListType=type([])
...@@ -784,7 +786,7 @@ class DocumentClass: ...@@ -784,7 +786,7 @@ class DocumentClass:
def doc_numbered( def doc_numbered(
self, paragraph, self, paragraph,
expr = re.compile(r'(\s*[a-zA-Z]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)').match): expr = re.compile(r'(\s*[%s]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)' % letters).match):
# This is the old expression. It had a nasty habit # This is the old expression. It had a nasty habit
# of grabbing paragraphs that began with a single # of grabbing paragraphs that began with a single
...@@ -833,7 +835,7 @@ class DocumentClass: ...@@ -833,7 +835,7 @@ class DocumentClass:
delim=d) delim=d)
def doc_header(self, paragraph, def doc_header(self, paragraph,
expr = re.compile(r'[ a-zA-Z0-9.:/,-_*<>\?\'\"]+').match expr = re.compile(r'[ %s0-9.:/,-_*<>\?\'\"]+' % letters).match
): ):
subs=paragraph.getSubparagraphs() subs=paragraph.getSubparagraphs()
if not subs: return None if not subs: return None
...@@ -865,7 +867,7 @@ class DocumentClass: ...@@ -865,7 +867,7 @@ class DocumentClass:
def doc_emphasize( def doc_emphasize(
self, s, self, s,
expr = re.compile(r'\s*\*([ \na-zA-Z0-9.:/;,\'\"\?\-\_\/\=\-\>\<\(\)]+)\*(?!\*|-)').search expr = re.compile(r'\s*\*([ \n%s0-9.:/;,\'\"\?\-\_\/\=\-\>\<\(\)]+)\*(?!\*|-)' % letters).search
): ):
r=expr(s) r=expr(s)
...@@ -878,7 +880,7 @@ class DocumentClass: ...@@ -878,7 +880,7 @@ class DocumentClass:
def doc_inner_link(self, def doc_inner_link(self,
s, s,
expr1 = re.compile(r"\.\.\s*").search, expr1 = re.compile(r"\.\.\s*").search,
expr2 = re.compile(r"\[[a-zA-Z0-9]+\]").search): expr2 = re.compile(r"\[[%s0-9]+\]" % letters ).search):
# make sure we dont grab a named link # make sure we dont grab a named link
if expr2(s) and expr1(s): if expr2(s) and expr1(s):
...@@ -898,7 +900,7 @@ class DocumentClass: ...@@ -898,7 +900,7 @@ class DocumentClass:
def doc_named_link(self, def doc_named_link(self,
s, s,
expr=re.compile(r"(\.\.\s)(\[[a-zA-Z0-9]+\])").search): expr=re.compile(r"(\.\.\s)(\[[%s0-9]+\])" % letters).search):
result = expr(s) result = expr(s)
if result: if result:
...@@ -912,7 +914,7 @@ class DocumentClass: ...@@ -912,7 +914,7 @@ class DocumentClass:
def doc_underline(self, def doc_underline(self,
s, s,
expr=re.compile(r"\_([a-zA-Z0-9\s\.,\?]+)\_").search): expr=re.compile(r"\_([%s0-9\s\.,\?]+)\_" % letters).search):
result = expr(s) result = expr(s)
if result: if result:
...@@ -924,7 +926,7 @@ class DocumentClass: ...@@ -924,7 +926,7 @@ class DocumentClass:
def doc_strong(self, def doc_strong(self,
s, s,
expr = re.compile(r'\s*\*\*([ \na-zA-Z0-9.:/;\-,!\?\'\"]+)\*\*').search expr = re.compile(r'\s*\*\*([ \n%sZ0-9.:/;\-,!\?\'\"]+)\*\*' % letters).search
): ):
r=expr(s) r=expr(s)
...@@ -935,8 +937,8 @@ class DocumentClass: ...@@ -935,8 +937,8 @@ class DocumentClass:
return None return None
## Some constants to make the doc_href() regex easier to read. ## Some constants to make the doc_href() regex easier to read.
_DQUOTEDTEXT = r'("[ a-zA-Z0-9\n\-\.\,\;\(\)\/\:\/\*\']+")' ## double quoted text _DQUOTEDTEXT = r'("[%s0-9\n\-\.\,\;\(\)\/\:\/\*\']+")' % letters ## double quoted text
_URL_AND_PUNC = r'([a-zA-Z0-9\@\.\,\?\!\/\:\;\-\#\~]+)' _URL_AND_PUNC = r'([%s0-9\@\.\,\?\!\/\:\;\-\#\~]+)' % letters
_SPACES = r'(\s*)' _SPACES = r'(\s*)'
def doc_href(self, s, def doc_href(self, s,
...@@ -970,7 +972,7 @@ class DocumentClass: ...@@ -970,7 +972,7 @@ class DocumentClass:
else: else:
return None return None
def doc_sgml(self,s,expr=re.compile(r"\<[a-zA-Z0-9\.\=\'\"\:\/\-\#\+\s\*]+\>").search): def doc_sgml(self,s,expr=re.compile(r"\<[%s0-9\.\=\'\"\:\/\-\#\+\s\*]+\>" % letters).search):
""" """
SGML text is ignored and outputed as-is SGML text is ignored and outputed as-is
""" """
...@@ -982,7 +984,7 @@ class DocumentClass: ...@@ -982,7 +984,7 @@ class DocumentClass:
def doc_xref(self, s, def doc_xref(self, s,
expr = re.compile('\[([a-zA-Z0-9\-.:/;,\n\~]+)\]').search expr = re.compile('\[([%s0-9\-.:/;,\n\~]+)\]' % letters).search
): ):
r = expr(s) r = expr(s)
if r: if r:
......
...@@ -101,6 +101,9 @@ def HTML(aStructuredString, level=0): ...@@ -101,6 +101,9 @@ def HTML(aStructuredString, level=0):
doc = Document(st) doc = Document(st)
return HTMLNG(doc) return HTMLNG(doc)
def StructuredText(aStructuredString, level=0):
return HTML(aStructuredString,level)
def html_with_references(text, level=1): def html_with_references(text, level=1):
text = re.sub( text = re.sub(
r'[\0\n]\.\. \[([0-9_%s-]+)\]' % letters, r'[\0\n]\.\. \[([0-9_%s-]+)\]' % letters,
......
...@@ -85,7 +85,7 @@ ...@@ -85,7 +85,7 @@
"""Zope Classes """Zope Classes
""" """
import Globals, string, OFS.SimpleItem, OFS.PropertySheets, Products import Globals, string, OFS.SimpleItem, OFS.PropertySheets, Products
import Method, Basic, Property, AccessControl.Role, ts_regex import Method, Basic, Property, AccessControl.Role, re
from ZPublisher.mapply import mapply from ZPublisher.mapply import mapply
from ExtensionClass import Base from ExtensionClass import Base
...@@ -190,14 +190,14 @@ def dbVersionEquals(ver): ...@@ -190,14 +190,14 @@ def dbVersionEquals(ver):
Globals.DatabaseVersion == ver Globals.DatabaseVersion == ver
bad_id=ts_regex.compile('[^a-zA-Z0-9_]').search bad_id=re.compile('[^a-zA-Z0-9_]').search
def manage_addZClass(self, id, title='', baseclasses=[], def manage_addZClass(self, id, title='', baseclasses=[],
meta_type='', CreateAFactory=0, REQUEST=None, meta_type='', CreateAFactory=0, REQUEST=None,
zope_object=0): zope_object=0):
"""Add a Z Class """Add a Z Class
""" """
if bad_id(id) != -1: if bad_id(id) is not None:
raise 'Bad Request', ( raise 'Bad Request', (
'The id %s is invalid as a class name.' % id) 'The id %s is invalid as a class name.' % id)
if not meta_type: meta_type=id if not meta_type: meta_type=id
......
...@@ -93,9 +93,8 @@ from ZPublisher.HTTPRequest import HTTPRequest ...@@ -93,9 +93,8 @@ from ZPublisher.HTTPRequest import HTTPRequest
from cStringIO import StringIO from cStringIO import StringIO
import os import os
from regsub import gsub
from base64 import encodestring from base64 import encodestring
import string import string,re
class FTPRequest(HTTPRequest): class FTPRequest(HTTPRequest):
...@@ -141,7 +140,7 @@ class FTPRequest(HTTPRequest): ...@@ -141,7 +140,7 @@ class FTPRequest(HTTPRequest):
env['REQUEST_METHOD']='GET' # XXX what should this be? env['REQUEST_METHOD']='GET' # XXX what should this be?
env['SERVER_SOFTWARE']=channel.server.SERVER_IDENT env['SERVER_SOFTWARE']=channel.server.SERVER_IDENT
if channel.userid != 'anonymous': if channel.userid != 'anonymous':
env['HTTP_AUTHORIZATION']='Basic %s' % gsub('\012','', env['HTTP_AUTHORIZATION']='Basic %s' % re.sub('\012','',
encodestring('%s:%s' % (channel.userid, channel.password))) encodestring('%s:%s' % (channel.userid, channel.password)))
env['SERVER_NAME']=channel.server.hostname env['SERVER_NAME']=channel.server.hostname
env['SERVER_PORT']=str(channel.server.port) env['SERVER_PORT']=str(channel.server.port)
......
...@@ -82,68 +82,75 @@ ...@@ -82,68 +82,75 @@
# attributions are listed in the accompanying credits file. # attributions are listed in the accompanying credits file.
# #
############################################################################## ##############################################################################
"""Provide a thread-safe interface to regex
""" """
import regex, regsub #, Sync Replacement of the old ts_regex module using the standard re module
from regex import * """
from regsub import split, sub, gsub, splitx, capwords
import re,reconvert
try: import sys
import thread
except: import ts_regex_old as OLD
class allocate_lock: import ts_regex_new as NEW
def acquire(*args): pass
def release(*args): pass
def _rcCV(s):
else:
class SafeFunction: cs = reconvert.convert(s)
_l=thread.allocate_lock() if cs != s:
_a=_l.acquire print 'Warning: "%s" must be converted to "%s"' % (s,cs)
_r=_l.release
return cs
def __init__(self, f):
self._f=f
def __call__(self, *args, **kw):
self._a()
try: return apply(self._f, args, kw)
finally: self._r()
split=SafeFunction(split)
sub=SafeFunction(sub)
gsub=SafeFunction(gsub)
splitx=SafeFunction(splitx)
capwords=SafeFunction(capwords)
allocate_lock=thread.allocate_lock
class compile:
_r=None def sub(pat,repl,str):
groupindex=None x = OLD.sub(pat,repl,str)
y = NEW.sub(pat,repl,str)
if x!=y: print 'Warning: sub():',pat,repl,str
return x
def gsub(pat,repl,str):
x = OLD.gsub(pat,repl,str)
y = NEW.gsub(pat,repl,str)
if x!=y: print 'Warning: subg():',pat,repl,str
return x
def split(str,pat,maxsplit=0):
x = OLD.split(str,pat,maxsplit)
y = NEW.split(str,pat,maxsplit)
if x!=y: print 'Warning: split():',str,pat,maxsplit
return x
def splitx(str,pat,maxsplit=0):
x = OLD.splitx(str,pat,maxsplit)
y = NEW.splitx(str,pat,maxsplit)
if x!=y: print 'Warning: splitx():',str,pat,maxsplit
return x
class compile:
def __init__(self, *args): def __init__(self, *args):
self._r=r=apply(regex.compile,args) print>>sys.stderr, args
self._init(r) self._old = apply(OLD.compile,args)
self._new = apply(NEW.compile,args)
def _init(self, r):
lock=allocate_lock()
self.__a=lock.acquire
self.__r=lock.release
self.translate=r.translate
self.givenpat=r.givenpat
self.realpat=r.realpat
def match(self, string, pos=0): def match(self, string, pos=0):
self.__a() x = self._old.match(string,pos)
try: return self._r.match(string, pos) y = self._new.match(string,pos)
finally: self.__r() if x!=y: print 'Warning: match():',string,pos
return x
def search(self, string, pos=0): def search(self, string, pos=0):
self.__a() x = self._old.search(string,pos)
try: return self._r.search(string, pos) y = self._new.search(string,pos)
finally: self.__r() if x!=y: print 'Warning: search():',string,pos
return x
def search_group(self, str, group, pos=0): def search_group(self, str, group, pos=0):
"""Search a string for a pattern. """Search a string for a pattern.
...@@ -151,13 +158,11 @@ class compile: ...@@ -151,13 +158,11 @@ class compile:
otherwise, the location where the pattern was found, otherwise, the location where the pattern was found,
as well as any specified group are returned. as well as any specified group are returned.
""" """
self.__a() x = self._old.search_group(str,group,pos)
try: y = self._new.search_group(str,group,pos)
r=self._r if x!=y: print 'Warning: seach_group(%s,%s,%s) %s vs %s' % (str,group,pos,x,y)
l=r.search(str, pos) return x
if l < 0: return None
return l, apply(r.group, group)
finally: self.__r()
def match_group(self, str, group, pos=0): def match_group(self, str, group, pos=0):
"""Match a pattern against a string """Match a pattern against a string
...@@ -166,50 +171,53 @@ class compile: ...@@ -166,50 +171,53 @@ class compile:
returned, otherwise, the length of the match, as well returned, otherwise, the length of the match, as well
as any specified group are returned. as any specified group are returned.
""" """
self.__a() x = self._old.match_group(str,group,pos)
try: y = self._new.match_group(str,group,pos)
r=self._r if x!=y:
l=r.match(str, pos) print 'Warning: match_group(%s,%s,%s) %s vs %s' % (str,group,pos,x,y)
if l < 0: return None print self._old.givenpat
return l, apply(r.group, group) print self._new.givenpat
finally: self.__r() return x
def search_regs(self, str, pos=0):
"""Search a string for a pattern.
If the pattern was not found, then None is returned,
otherwise, the 'regs' attribute of the expression is
returned.
"""
self.__a()
try:
r=self._r
r.search(str, pos)
return r.regs
finally: self.__r()
def match_regs(self, str, pos=0):
"""Match a pattern against a string
If the string does not match the pattern, then None is if __name__=='__main__':
returned, otherwise, the 'regs' attribute of the expression is
returned.
"""
self.__a()
try:
r=self._r
r.match(str, pos)
return r.regs
finally: self.__r()
class symcomp(compile): import sys
def __init__(self, *args): s1 = 'The quick brown fox jumps of The lazy dog'
self._r=r=apply(regex.symcomp,args) s2 = '892 The quick brown 123 fox jumps over 3454 21 The lazy dog'
self._init(r)
self.groupindex=r.groupindex r1 = ' [a-zA-Z][a-zA-Z] '
r2 = '[0-9][0-9]'
print 'new:',split(s1,' ')
print 'new:',splitx(s2,' ')
print 'new:',split(s2,' ',2)
print 'new:',splitx(s2,' ',2)
print 'new:',sub('The','###',s1)
print 'new:',gsub('The','###',s1)
p1 = compile(r1)
p2 = compile(r2)
for s in [s1,s2]:
print 'search'
print 'new:',p1.search(s)
print 'new:',p2.search(s)
print 'match'
print 'new:',p1.match(s)
print 'new:',p2.match(s)
print 'match_group'
print 'new:',p1.match_group(s,(0,))
print 'new:',p2.match_group(s,(0,))
print 'search_group'
print 'new:',p1.match_group(s,(0,1))
print 'new:',p2.match_group(s,(0,1))
"""HTTP 1.1 / WebDAV client library.""" """HTTP 1.1 / WebDAV client library."""
__version__='$Revision: 1.16 $'[11:-2] __version__='$Revision: 1.17 $'[11:-2]
import sys, os, string, time, types,re import sys, os, string, time, types,re
import socket, httplib, mimetools import socket, httplib, mimetools
...@@ -57,8 +57,9 @@ class Resource: ...@@ -57,8 +57,9 @@ class Resource:
self.username=username self.username=username
self.password=password self.password=password
self.url=url self.url=url
mo = urlregex(match(url))
if mo: mo = urlreg.match(url)
if mo:
host,port,uri=mo.group(1,2,3) host,port,uri=mo.group(1,2,3)
self.host=host self.host=host
self.port=port and string.atoi(port[1:]) or 80 self.port=port and string.atoi(port[1:]) or 80
...@@ -157,7 +158,7 @@ class Resource: ...@@ -157,7 +158,7 @@ class Resource:
return self.__snd_request('POST', self.uri, headers, body) return self.__snd_request('POST', self.uri, headers, body)
def put(self, file='', content_type='', content_enc='', def put(self, file='', content_type='', content_enc='',
isbin=re.compile('[\0-\6\177-\277]').search, isbin=re.compile(r'[\000-\006\177-\277]').search,
**kw): **kw):
headers=self.__get_headers(kw) headers=self.__get_headers(kw)
filetype=type(file) filetype=type(file)
...@@ -425,7 +426,7 @@ find_xml="""<?xml version="1.0" encoding="utf-8" ?> ...@@ -425,7 +426,7 @@ find_xml="""<?xml version="1.0" encoding="utf-8" ?>
# Implementation details below here # Implementation details below here
urlregex=re.compile('http://([^:/]+)(:[0-9]+)?(/.+)?', re.I) urlreg=re.compile(r'http://([^:/]+)(:[0-9]+)?(/.+)?', re.I)
def marshal_string(name, val): def marshal_string(name, val):
return '%s=%s' % (name, quote(str(val))) return '%s=%s' % (name, quote(str(val)))
......
...@@ -85,9 +85,9 @@ ...@@ -85,9 +85,9 @@
"""WebDAV xml request objects.""" """WebDAV xml request objects."""
__version__='$Revision: 1.12 $'[11:-2] __version__='$Revision: 1.13 $'[11:-2]
import sys, os, string, regex import sys, os, string
from common import absattr, aq_base, urlfix, urlbase from common import absattr, aq_base, urlfix, urlbase
from OFS.PropertySheets import DAVProperties from OFS.PropertySheets import DAVProperties
from LockItem import LockItem from LockItem import LockItem
......
# Implement the "hookable PUT" hook. # Implement the "hookable PUT" hook.
import re, OFS.DTMLMethod import re, OFS.DTMLMethod
TEXT_PATTERN = re.compile( '^text/.*$' ) TEXT_PATTERN = re.compile( r'^text/.*$' )
def PUT_factory( self, name, typ, body ): def PUT_factory( self, name, typ, body ):
""" """
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment