Commit d5684ea4 authored by Jérome Perrin's avatar Jérome Perrin

corporate_identity: py3

parent dbdc33b2
...@@ -128,7 +128,7 @@ def populateOrganisationDict(my_organisation_list): ...@@ -128,7 +128,7 @@ def populateOrganisationDict(my_organisation_list):
if organisation_default_image: if organisation_default_image:
output_dict["logo_url"] = organisation_default_image.getRelativeUrl() output_dict["logo_url"] = organisation_default_image.getRelativeUrl()
output_dict["logo_data_url"] = 'data:image/png;;base64,%s' % ( output_dict["logo_data_url"] = 'data:image/png;;base64,%s' % (
b64encode(organisation_default_image.convert(format="png", display="thumbnail")[1]) b64encode(organisation_default_image.convert(format="png", display="thumbnail")[1]).decode()
) )
else: else:
output_dict["logo_url"] = err("logo_url") output_dict["logo_url"] = err("logo_url")
......
...@@ -256,9 +256,9 @@ if letter_format == "pdf": ...@@ -256,9 +256,9 @@ if letter_format == "pdf":
) )
# ================ encode and build cloudoo elements ========================= # ================ encode and build cloudoo elements =========================
embedded_html_data = letter.Base_convertHtmlToSingleFile(letter_content, allow_script=True) embedded_html_data = letter.Base_convertHtmlToSingleFile(letter_content, allow_script=True).encode('utf-8')
header_embedded_html_data = letter.Base_convertHtmlToSingleFile(letter_head, allow_script=True) header_embedded_html_data = letter.Base_convertHtmlToSingleFile(letter_head, allow_script=True).encode('utf-8')
footer_embedded_html_data = letter.Base_convertHtmlToSingleFile(letter_foot, allow_script=True) footer_embedded_html_data = letter.Base_convertHtmlToSingleFile(letter_foot, allow_script=True).encode('utf-8')
pdf_file = letter.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict( pdf_file = letter.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict(
encoding="utf8", encoding="utf8",
margin_top=letter_header_margin_to_top, margin_top=letter_header_margin_to_top,
...@@ -266,8 +266,8 @@ if letter_format == "pdf": ...@@ -266,8 +266,8 @@ if letter_format == "pdf":
margin_left=0, margin_left=0,
margin_right=0, margin_right=0,
header_spacing=1, header_spacing=1,
header_html_data=b64encode(header_embedded_html_data), header_html_data=b64encode(header_embedded_html_data).decode(),
footer_html_data=b64encode(footer_embedded_html_data), footer_html_data=b64encode(footer_embedded_html_data).decode(),
) )
) )
......
...@@ -36,6 +36,7 @@ MAIN FILE: generate report (book header/footer and report content) ...@@ -36,6 +36,7 @@ MAIN FILE: generate report (book header/footer and report content)
# report_title report title # report_title report title
# report_header custom report header # report_header custom report header
from Products.ERP5Type.Utils import str2bytes
from Products.PythonScripts.standard import html_quote from Products.PythonScripts.standard import html_quote
from base64 import b64encode from base64 import b64encode
...@@ -258,21 +259,21 @@ if doc_format == "pdf": ...@@ -258,21 +259,21 @@ if doc_format == "pdf":
) )
# ================ encode and build cloudoo elements ========================= # ================ encode and build cloudoo elements =========================
header_embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_head, allow_script=True) header_embedded_html_data = str2bytes(doc.Base_convertHtmlToSingleFile(doc_head, allow_script=True))
before_toc_data_list = [] before_toc_data_list = []
xsl_style_sheet_data = blank xsl_style_sheet_data = blank.encode()
embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_content, allow_script=True) embedded_html_data = str2bytes(doc.Base_convertHtmlToSingleFile(doc_content, allow_script=True))
footer_embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_foot, allow_script=True) footer_embedded_html_data = str2bytes(doc.Base_convertHtmlToSingleFile(doc_foot, allow_script=True))
default_conversion_kw = dict( default_conversion_kw = dict(
encoding="utf8", encoding="utf8",
margin_top=40, margin_top=40,
margin_bottom=20, margin_bottom=20,
toc=False, toc=False,
before_toc_data_list=before_toc_data_list, before_toc_data_list=before_toc_data_list,
xsl_style_sheet_data=b64encode(xsl_style_sheet_data), xsl_style_sheet_data=b64encode(xsl_style_sheet_data).decode(),
header_html_data=b64encode(header_embedded_html_data), header_html_data=b64encode(header_embedded_html_data).decode(),
header_spacing=10, header_spacing=10,
footer_html_data=b64encode(footer_embedded_html_data), footer_html_data=b64encode(footer_embedded_html_data).decode(),
footer_spacing=3, footer_spacing=3,
) )
default_conversion_kw.update(conversion_dict) default_conversion_kw.update(conversion_dict)
......
...@@ -11,6 +11,8 @@ Try to convert old OpenOffice presentations into slideshows ...@@ -11,6 +11,8 @@ Try to convert old OpenOffice presentations into slideshows
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
import re import re
from io import BytesIO
from zipfile import ZipFile
blank = '' blank = ''
flags = re.MULTILINE|re.DOTALL|re.IGNORECASE flags = re.MULTILINE|re.DOTALL|re.IGNORECASE
...@@ -18,8 +20,21 @@ flags = re.MULTILINE|re.DOTALL|re.IGNORECASE ...@@ -18,8 +20,21 @@ flags = re.MULTILINE|re.DOTALL|re.IGNORECASE
def getHeaderSlideTitle(my_doc): def getHeaderSlideTitle(my_doc):
return '<h1>' + my_doc.getTitle() + '</h1>' return '<h1>' + my_doc.getTitle() + '</h1>'
def getSlideList(content): def getSlideList(zip_content):
return re.findall(r'<html>(.*?)</html>', content, flags=flags) slide_list = []
with ZipFile(BytesIO(zip_content)) as zf:
for name in sorted(
zf.namelist(),
# iterate in order: 'tmpczlzod7e.impr.html', 'img1.html', 'text1.html', 'img2.html', 'text2.html'
key=lambda name: (
not name.endswith('impr.html'),
'img' not in name,
name.replace('img', '').replace('text', ''))):
if name.endswith('.html'):
slide_list.extend(
re.findall(r'<html>(.*?)</html>', zf.read(name).decode('utf-8'), flags=flags)
)
return slide_list
def getKey(item): def getKey(item):
return int(item[0]) return int(item[0])
...@@ -29,7 +44,11 @@ if context.getPortalType() in ["Presentation"]: ...@@ -29,7 +44,11 @@ if context.getPortalType() in ["Presentation"]:
portal = context.getPortalObject() portal = context.getPortalObject()
mimetype = 'text/html' mimetype = 'text/html'
content_type = context.getContentType() content_type = context.getContentType()
raw_data = portal.portal_transforms.convertToData(mimetype, bytes(context.getData() or b""), context=context, mimetype=content_type) raw_data = portal.portal_transforms.convertToData(
mimetype,
bytes(context.getData() or b""),
context=context,
mimetype=content_type)
if raw_data is None: if raw_data is None:
raise ValueError("Failed to convert to %r" % mimetype) raise ValueError("Failed to convert to %r" % mimetype)
if context.REQUEST is not None: if context.REQUEST is not None:
...@@ -38,6 +57,8 @@ if context.getPortalType() in ["Presentation"]: ...@@ -38,6 +57,8 @@ if context.getPortalType() in ["Presentation"]:
# get a list of slides # get a list of slides
content = getSlideList(raw_data) content = getSlideList(raw_data)
# ( comment below might be obsolete, this was before fixing a bug that we iterated
# directly in the binary data from .zip raw content, which was somehow OK on python2 )
# every slide is in the raw_data twice, once with the title and image as text, # every slide is in the raw_data twice, once with the title and image as text,
# once with the slidecontent without title. All slides are mixed randomly, so # once with the slidecontent without title. All slides are mixed randomly, so
# we need to find out which slide contains what and then put them in their # we need to find out which slide contains what and then put them in their
......
...@@ -15,7 +15,7 @@ import re ...@@ -15,7 +15,7 @@ import re
from Products.PythonScripts.standard import html_quote from Products.PythonScripts.standard import html_quote
blank = "" blank = ""
header_current = 1 header_current = '0'
header_initial = None header_initial = None
table_of_content = blank table_of_content = blank
index = 0 index = 0
......
...@@ -10,6 +10,7 @@ Insert reports linked to in a document (including backcompat handling) ...@@ -10,6 +10,7 @@ Insert reports linked to in a document (including backcompat handling)
# doc_format output format being generated # doc_format output format being generated
import re import re
import six
document = context document = context
...@@ -28,10 +29,14 @@ def getReportViaFancyName(my_report_name, follow_up): ...@@ -28,10 +29,14 @@ def getReportViaFancyName(my_report_name, follow_up):
if method_call is not None: if method_call is not None:
# extra curl: Coverage report requires parameter details (1|0) # extra curl: Coverage report requires parameter details (1|0)
if coverage_name: if coverage_name:
return method_call(display_comment=True)[0].encode(encoding='UTF-8') result = method_call(display_comment=True)[0]
if detail_name: elif detail_name:
return method_call(format='detailed',display_detail = 1)[0].encode(encoding='UTF-8') result = method_call(format='detailed',display_detail = 1)[0]
return method_call(display_comment=True)[0].encode(encoding='UTF-8') else:
result = method_call(display_comment=True)[0]
if six.PY2:
result = result.encode(encoding='UTF-8')
return result
if doc_content.find('${WebPage_') != -1: if doc_content.find('${WebPage_') != -1:
document_required_follow_up_list = [x.getObject() for x in document.portal_catalog( document_required_follow_up_list = [x.getObject() for x in document.portal_catalog(
...@@ -82,8 +87,10 @@ for link in re.findall('([^[]<a.*?</a>[^]])', doc_content): ...@@ -82,8 +87,10 @@ for link in re.findall('([^[]<a.*?</a>[^]])', doc_content):
if target_context is not None: if target_context is not None:
target_caller = getattr(target_context, report_name, None) target_caller = getattr(target_context, report_name, None)
if target_caller is not None: if target_caller is not None:
substitution_content = target_caller(**link_param_dict) substitution_content = target_caller(**link_param_dict)[0]
if six.PY2:
substitution_content = substitution_content.encode("utf-8")
# Note: switched to report returning a tuple with (content, header-title, header-subtitle) # Note: switched to report returning a tuple with (content, header-title, header-subtitle)
doc_content = doc_content.replace(link, substitution_content[0].encode("utf-8").strip()) doc_content = doc_content.replace(link, substitution_content.strip())
return doc_content return doc_content
...@@ -33,6 +33,7 @@ MAIN FILE: generate book in different output formats ...@@ -33,6 +33,7 @@ MAIN FILE: generate book in different output formats
# display_svg format for svg images (svg, png*) # display_svg format for svg images (svg, png*)
import re import re
import six
from Products.PythonScripts.standard import html_quote from Products.PythonScripts.standard import html_quote
from base64 import b64encode from base64 import b64encode
...@@ -83,8 +84,7 @@ book_version = html_quote(override_document_version) if override_document_versio ...@@ -83,8 +84,7 @@ book_version = html_quote(override_document_version) if override_document_versio
book_description = html_quote(override_document_description) if override_document_description else book.getDescription() book_description = html_quote(override_document_description) if override_document_description else book.getDescription()
book_title = html_quote(override_document_title) if override_document_title else book.getTitle() book_title = html_quote(override_document_title) if override_document_title else book.getTitle()
# unicode if six.PY2 and isinstance(book_content, unicode):
if isinstance(book_content, unicode):
book_content = book_content.encode("UTF-8") book_content = book_content.encode("UTF-8")
# backcompat # backcompat
...@@ -197,7 +197,9 @@ if book_include_reference_table: ...@@ -197,7 +197,9 @@ if book_include_reference_table:
#else: #else:
# book_content = book_content.replace("${WebPage_insertTableOfReferences}", book_references.encode('UTF-8').strip()) # book_content = book_content.replace("${WebPage_insertTableOfReferences}", book_references.encode('UTF-8').strip())
book_references = book.Base_unescape(book_references) book_references = book.Base_unescape(book_references)
book_content = book_content.replace("${WebPage_insertTableOfReferences}", book_references.encode('UTF-8').strip()) if six.PY2:
book_references = book_references.encode('utf-8')
book_content = book_content.replace("${WebPage_insertTableOfReferences}", book_references.strip())
else: else:
book_content = book_content.replace("${WebPage_insertTableOfReferences}", blank) book_content = book_content.replace("${WebPage_insertTableOfReferences}", blank)
...@@ -209,7 +211,7 @@ if book_include_content_table: ...@@ -209,7 +211,7 @@ if book_include_content_table:
book_table_of_content = book.WebPage_createBookXslTableOfContent( book_table_of_content = book.WebPage_createBookXslTableOfContent(
book_toc_title=book_translated_toc_title, book_toc_title=book_translated_toc_title,
margin_15mm = margin_15mm margin_15mm = margin_15mm
).encode('UTF-8').strip() ).strip()
elif book_format == "html": elif book_format == "html":
book_content, book_table_of_content = book.WebPage_createTableOfContent( book_content, book_table_of_content = book.WebPage_createTableOfContent(
doc_content=book_content, doc_content=book_content,
...@@ -357,22 +359,22 @@ elif book_format == "pdf": ...@@ -357,22 +359,22 @@ elif book_format == "pdf":
) )
# ================ encode and build cloudoo elements ========================= # ================ encode and build cloudoo elements =========================
header_embedded_html_data = book.Base_convertHtmlToSingleFile(book_head, allow_script=True) header_embedded_html_data = book.Base_convertHtmlToSingleFile(book_head, allow_script=True).encode('utf-8')
before_toc_data_list = [ before_toc_data_list = [
b64encode(book.Base_convertHtmlToSingleFile(book_cover, allow_script=True)), b64encode(book.Base_convertHtmlToSingleFile(book_cover, allow_script=True).encode('utf-8')).decode(),
] ]
after_toc_data_list = [] after_toc_data_list = []
if book_include_history_table: if book_include_history_table:
before_toc_data_list.append( before_toc_data_list.append(
b64encode(book.Base_convertHtmlToSingleFile(book_history, allow_script=True)) b64encode(book.Base_convertHtmlToSingleFile(book_history, allow_script=True).encode('utf-8')).decode()
) )
#if book_include_reference_table: #if book_include_reference_table:
# after_toc_data_list.append( # after_toc_data_list.append(
# b64encode(book.Base_convertHtmlToSingleFile(book_references, allow_script=True)) # b64encode(book.Base_convertHtmlToSingleFile(book_references, allow_script=True).encode('utf-8')).decode()
# ) # )
xsl_style_sheet_data = book_table_of_content xsl_style_sheet_data = book_table_of_content.encode('utf-8')
embedded_html_data = book.Base_convertHtmlToSingleFile(book_content, allow_script=True) embedded_html_data = book.Base_convertHtmlToSingleFile(book_content, allow_script=True).encode('utf-8')
footer_embedded_html_data = book.Base_convertHtmlToSingleFile(book_foot, allow_script=True) footer_embedded_html_data = book.Base_convertHtmlToSingleFile(book_foot, allow_script=True).encode('utf-8')
if margin_15mm: if margin_15mm:
margin_top = 50 margin_top = 50
margin_bottom = 25 margin_bottom = 25
...@@ -385,11 +387,11 @@ elif book_format == "pdf": ...@@ -385,11 +387,11 @@ elif book_format == "pdf":
margin_bottom=margin_bottom, margin_bottom=margin_bottom,
toc=True if book_include_content_table else False, toc=True if book_include_content_table else False,
before_toc_data_list=before_toc_data_list, before_toc_data_list=before_toc_data_list,
xsl_style_sheet_data=b64encode(xsl_style_sheet_data), xsl_style_sheet_data=b64encode(xsl_style_sheet_data).decode(),
after_toc_data_list=after_toc_data_list, after_toc_data_list=after_toc_data_list,
header_html_data=b64encode(header_embedded_html_data), header_html_data=b64encode(header_embedded_html_data).decode(),
header_spacing=10, header_spacing=10,
footer_html_data=b64encode(footer_embedded_html_data), footer_html_data=b64encode(footer_embedded_html_data).decode(),
footer_spacing=3, footer_spacing=3,
) )
) )
......
import re import re
import six
from base64 import b64encode from base64 import b64encode
blank = '' blank = b''
pref = context.getPortalObject().portal_preferences pref = context.getPortalObject().portal_preferences
contract_format = kw.get('format') or 'html' contract_format = kw.get('format') or 'html'
...@@ -35,7 +36,7 @@ contract_version = context.getVersion() or "001" ...@@ -35,7 +36,7 @@ contract_version = context.getVersion() or "001"
contract_description = context.getDescription() contract_description = context.getDescription()
contract_title = context.getTitle() contract_title = context.getTitle()
if isinstance(contract_content, unicode): if six.PY2 and isinstance(contract_content, unicode):
contract_content = contract_content.encode("UTF-8") contract_content = contract_content.encode("UTF-8")
contract_history_section_list = re.findall('<section.+?>.+?</section>', contract_content, re.S) contract_history_section_list = re.findall('<section.+?>.+?</section>', contract_content, re.S)
...@@ -289,22 +290,22 @@ elif contract_format == "pdf": ...@@ -289,22 +290,22 @@ elif contract_format == "pdf":
) )
# ================ encode and build cloudoo elements ========================= # ================ encode and build cloudoo elements =========================
header_embedded_html_data = context.Base_convertHtmlToSingleFile(contract_head, allow_script=True) header_embedded_html_data = context.Base_convertHtmlToSingleFile(contract_head, allow_script=True).encode('utf-8')
before_toc_data_list = [ before_toc_data_list = [
b64encode(context.Base_convertHtmlToSingleFile(contract_cover, allow_script=True)), b64encode(context.Base_convertHtmlToSingleFile(contract_cover, allow_script=True).encode('utf-8')).decode(),
] ]
after_toc_data_list = [] after_toc_data_list = []
if contract_include_history_table: if contract_include_history_table:
before_toc_data_list.append( before_toc_data_list.append(
b64encode(context.Base_convertHtmlToSingleFile(contract_history, allow_script=True)) b64encode(context.Base_convertHtmlToSingleFile(contract_history, allow_script=True).encode('utf-8')).decode()
) )
#if contract_include_reference_table: #if contract_include_reference_table:
# after_toc_data_list.append( # after_toc_data_list.append(
# b64encode(context.Base_convertHtmlToSingleFile(contract_references, allow_script=True)) # b64encode(context.Base_convertHtmlToSingleFile(contract_references, allow_script=True)).decode()
# ) # )
xsl_style_sheet_data = contract_table_of_content xsl_style_sheet_data = contract_table_of_content
embedded_html_data = context.Base_convertHtmlToSingleFile(contract_content, allow_script=True) embedded_html_data = context.Base_convertHtmlToSingleFile(contract_content, allow_script=True).encode('utf-8')
footer_embedded_html_data = context.Base_convertHtmlToSingleFile(contract_foot, allow_script=True) footer_embedded_html_data = context.Base_convertHtmlToSingleFile(contract_foot, allow_script=True).encode('utf-8')
margin_top = 40 margin_top = 40
margin_bottom = 20 margin_bottom = 20
pdf_file = context.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict( pdf_file = context.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict(
...@@ -313,11 +314,11 @@ elif contract_format == "pdf": ...@@ -313,11 +314,11 @@ elif contract_format == "pdf":
margin_bottom=margin_bottom, margin_bottom=margin_bottom,
toc=True if contract_include_content_table else False, toc=True if contract_include_content_table else False,
before_toc_data_list=before_toc_data_list, before_toc_data_list=before_toc_data_list,
xsl_style_sheet_data=b64encode(xsl_style_sheet_data), xsl_style_sheet_data=b64encode(xsl_style_sheet_data).decode(),
after_toc_data_list=after_toc_data_list, after_toc_data_list=after_toc_data_list,
header_html_data=b64encode(header_embedded_html_data), header_html_data=b64encode(header_embedded_html_data).decode(),
header_spacing=10, header_spacing=10,
footer_html_data=b64encode(footer_embedded_html_data), footer_html_data=b64encode(footer_embedded_html_data).decode(),
footer_spacing=3, footer_spacing=3,
) )
) )
......
...@@ -22,6 +22,7 @@ MAIN FILE: render two pager in different output formats ...@@ -22,6 +22,7 @@ MAIN FILE: render two pager in different output formats
# document_save: save file in document module (default None) # document_save: save file in document module (default None)
import re import re
import six
from Products.PythonScripts.standard import html_quote from Products.PythonScripts.standard import html_quote
from base64 import b64encode from base64 import b64encode
...@@ -146,6 +147,7 @@ if leaflet_display_side: ...@@ -146,6 +147,7 @@ if leaflet_display_side:
) )
#leaflet_content = leaflet_legalese.decode() + leaflet_content.decode() #leaflet_content = leaflet_legalese.decode() + leaflet_content.decode()
if six.PY2:
if isinstance(leaflet_legalese, unicode): if isinstance(leaflet_legalese, unicode):
leaflet_legalese = leaflet_legalese.encode("UTF-8") leaflet_legalese = leaflet_legalese.encode("UTF-8")
if isinstance(leaflet_content, unicode): if isinstance(leaflet_content, unicode):
...@@ -255,9 +257,9 @@ if leaflet_format == "pdf": ...@@ -255,9 +257,9 @@ if leaflet_format == "pdf":
) )
# ================ encode and build cloudoo elements ========================= # ================ encode and build cloudoo elements =========================
embedded_html_data = leaflet.Base_convertHtmlToSingleFile(leaflet_content, allow_script=True) embedded_html_data = leaflet.Base_convertHtmlToSingleFile(leaflet_content, allow_script=True).encode('utf-8')
header_embedded_html_data = leaflet.Base_convertHtmlToSingleFile(leaflet_head, allow_script=True) header_embedded_html_data = leaflet.Base_convertHtmlToSingleFile(leaflet_head, allow_script=True).encode('utf-8')
footer_embedded_html_data = leaflet.Base_convertHtmlToSingleFile(leaflet_foot, allow_script=True) footer_embedded_html_data = leaflet.Base_convertHtmlToSingleFile(leaflet_foot, allow_script=True).encode('utf-8')
pdf_file = leaflet.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict( pdf_file = leaflet.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict(
encoding="utf8", encoding="utf8",
orientation="portrait", orientation="portrait",
...@@ -265,9 +267,9 @@ if leaflet_format == "pdf": ...@@ -265,9 +267,9 @@ if leaflet_format == "pdf":
margin_bottom=20, margin_bottom=20,
margin_left=0, margin_left=0,
margin_right=0, margin_right=0,
header_html_data=b64encode(header_embedded_html_data), header_html_data=b64encode(header_embedded_html_data).decode(),
header_spacing=10, header_spacing=10,
footer_html_data=b64encode(footer_embedded_html_data), footer_html_data=b64encode(footer_embedded_html_data).decode(),
footer_spacing=3 footer_spacing=3
) )
) )
......
...@@ -21,6 +21,7 @@ MAIN FILE: render press release in different output formats ...@@ -21,6 +21,7 @@ MAIN FILE: render press release in different output formats
# document_save: save file in document module (default None) # document_save: save file in document module (default None)
import re import re
import six
from base64 import b64encode from base64 import b64encode
blank = '' blank = ''
...@@ -137,6 +138,7 @@ if release_display_about: ...@@ -137,6 +138,7 @@ if release_display_about:
release_relative_url=release_relative_url, release_relative_url=release_relative_url,
) )
#release_content = release_content.decode() + release_about.decode() #release_content = release_content.decode() + release_about.decode()
if six.PY2:
if isinstance(release_content, unicode): if isinstance(release_content, unicode):
release_content = release_content.encode("UTF-8") release_content = release_content.encode("UTF-8")
if isinstance(release_about, unicode): if isinstance(release_about, unicode):
...@@ -226,9 +228,9 @@ if release_format == "pdf": ...@@ -226,9 +228,9 @@ if release_format == "pdf":
) )
# ================ encode and build cloudoo elements ========================= # ================ encode and build cloudoo elements =========================
embedded_html_data = release.Base_convertHtmlToSingleFile(release_content, allow_script=True) embedded_html_data = release.Base_convertHtmlToSingleFile(release_content, allow_script=True).encode('utf-8')
header_embedded_html_data = release.Base_convertHtmlToSingleFile(release_head, allow_script=True) header_embedded_html_data = release.Base_convertHtmlToSingleFile(release_head, allow_script=True).encode('utf-8')
footer_embedded_html_data = release.Base_convertHtmlToSingleFile(release_foot, allow_script=True) footer_embedded_html_data = release.Base_convertHtmlToSingleFile(release_foot, allow_script=True).encode('utf-8')
pdf_file = release.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict( pdf_file = release.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict(
encoding="utf8", encoding="utf8",
orientation="portrait", orientation="portrait",
...@@ -236,9 +238,9 @@ if release_format == "pdf": ...@@ -236,9 +238,9 @@ if release_format == "pdf":
margin_bottom=20, margin_bottom=20,
margin_left=0, margin_left=0,
margin_right=0, margin_right=0,
header_html_data=b64encode(header_embedded_html_data), header_html_data=b64encode(header_embedded_html_data).decode(),
header_spacing=10, header_spacing=10,
footer_html_data=b64encode(footer_embedded_html_data), footer_html_data=b64encode(footer_embedded_html_data).decode(),
footer_spacing=3 footer_spacing=3
) )
) )
......
...@@ -420,25 +420,25 @@ if doc_format == "pdf" or doc_format == "mhtml": ...@@ -420,25 +420,25 @@ if doc_format == "pdf" or doc_format == "mhtml":
) )
# ================ encode and build cloudoo elements ========================= # ================ encode and build cloudoo elements =========================
footer_embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_footer, allow_script=True) footer_embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_footer, allow_script=True).encode('utf-8')
#embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_content, allow_script=True) #embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_content, allow_script=True).encode('utf-8')
cover = doc.Base_convertHtmlToSingleFile(doc_slideshow_cover, allow_script=True).encode('utf-8')
before_body_data_list = [ before_body_data_list = [
b64encode(doc.Base_convertHtmlToSingleFile(doc_slideshow_cover, allow_script=True)), b64encode(cover).decode(),
] ]
if doc_format == "mhtml": if doc_format == "mhtml":
context.REQUEST.RESPONSE.setHeader("Content-Type", "text/html;") context.REQUEST.RESPONSE.setHeader("Content-Type", "text/html;")
return doc.Base_convertHtmlToSingleFile(doc_slideshow_cover, allow_script=True) return doc.Base_convertHtmlToSingleFile(doc_slideshow_cover, allow_script=True)
if doc_display_notes: if doc_display_notes:
#after_body_data_list = [ #after_body_data_list = [
# b64encode(doc.Base_convertHtmlToSingleFile(doc_slideshow_notes, allow_script=True)), # b64encode(doc.Base_convertHtmlToSingleFile(doc_slideshow_notes, allow_script=True).encode('utf-8')).decode(),
#] #]
embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_notes, allow_script=True) embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_notes, allow_script=True).encode('utf-8')
after_body_data_list = [] after_body_data_list = []
else: else:
embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_content, allow_script=True) embedded_html_data = doc.Base_convertHtmlToSingleFile(doc_slideshow_content, allow_script=True).encode('utf-8')
after_body_data_list = [] after_body_data_list = []
#after_body_data_list = [] #after_body_data_list = []
pdf_file = doc.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict( pdf_file = doc.Base_cloudoooDocumentConvert(embedded_html_data, "html", "pdf", conversion_kw=dict(
encoding="utf8", encoding="utf8",
orientation= "portrait" if doc_display_notes else "landscape", orientation= "portrait" if doc_display_notes else "landscape",
...@@ -447,7 +447,7 @@ if doc_format == "pdf" or doc_format == "mhtml": ...@@ -447,7 +447,7 @@ if doc_format == "pdf" or doc_format == "mhtml":
before_body_data_list=before_body_data_list, before_body_data_list=before_body_data_list,
after_body_data_list=after_body_data_list, after_body_data_list=after_body_data_list,
header_spacing=10, header_spacing=10,
footer_html_data=b64encode(footer_embedded_html_data), footer_html_data=b64encode(footer_embedded_html_data).decode(),
footer_spacing=3 footer_spacing=3
) )
) )
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment