PR #552 + black all files
This commit is contained in:
parent
f3ba468927
commit
a50b52b317
@ -4,7 +4,7 @@ Created : 2015-03-12
|
|||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
"""
|
"""
|
||||||
__version__ = '0.17.0'
|
__version__ = "0.17.0"
|
||||||
|
|
||||||
# flake8: noqa
|
# flake8: noqa
|
||||||
from .inline_image import InlineImage
|
from .inline_image import InlineImage
|
||||||
|
|||||||
@ -4,32 +4,41 @@ import os
|
|||||||
|
|
||||||
from .template import DocxTemplate, TemplateError
|
from .template import DocxTemplate, TemplateError
|
||||||
|
|
||||||
TEMPLATE_ARG = 'template_path'
|
TEMPLATE_ARG = "template_path"
|
||||||
JSON_ARG = 'json_path'
|
JSON_ARG = "json_path"
|
||||||
OUTPUT_ARG = 'output_filename'
|
OUTPUT_ARG = "output_filename"
|
||||||
OVERWRITE_ARG = 'overwrite'
|
OVERWRITE_ARG = "overwrite"
|
||||||
QUIET_ARG = 'quiet'
|
QUIET_ARG = "quiet"
|
||||||
|
|
||||||
|
|
||||||
def make_arg_parser():
|
def make_arg_parser():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
usage='python -m docxtpl [-h] [-o] [-q] {} {} {}'.format(TEMPLATE_ARG, JSON_ARG, OUTPUT_ARG),
|
usage="python -m docxtpl [-h] [-o] [-q] {} {} {}".format(
|
||||||
description='Make docx file from existing template docx and json data.')
|
TEMPLATE_ARG, JSON_ARG, OUTPUT_ARG
|
||||||
parser.add_argument(TEMPLATE_ARG,
|
),
|
||||||
type=str,
|
description="Make docx file from existing template docx and json data.",
|
||||||
help='The path to the template docx file.')
|
)
|
||||||
parser.add_argument(JSON_ARG,
|
parser.add_argument(
|
||||||
type=str,
|
TEMPLATE_ARG, type=str, help="The path to the template docx file."
|
||||||
help='The path to the json file with the data.')
|
)
|
||||||
parser.add_argument(OUTPUT_ARG,
|
parser.add_argument(
|
||||||
type=str,
|
JSON_ARG, type=str, help="The path to the json file with the data."
|
||||||
help='The filename to save the generated docx.')
|
)
|
||||||
parser.add_argument('-' + OVERWRITE_ARG[0], '--' + OVERWRITE_ARG,
|
parser.add_argument(
|
||||||
action='store_true',
|
OUTPUT_ARG, type=str, help="The filename to save the generated docx."
|
||||||
help='If output file already exists, overwrites without asking for confirmation')
|
)
|
||||||
parser.add_argument('-' + QUIET_ARG[0], '--' + QUIET_ARG,
|
parser.add_argument(
|
||||||
action='store_true',
|
"-" + OVERWRITE_ARG[0],
|
||||||
help='Do not display unnecessary messages')
|
"--" + OVERWRITE_ARG,
|
||||||
|
action="store_true",
|
||||||
|
help="If output file already exists, overwrites without asking for confirmation",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-" + QUIET_ARG[0],
|
||||||
|
"--" + QUIET_ARG,
|
||||||
|
action="store_true",
|
||||||
|
help="Do not display unnecessary messages",
|
||||||
|
)
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
@ -43,18 +52,21 @@ def get_args(parser):
|
|||||||
if e.code == 0:
|
if e.code == 0:
|
||||||
raise SystemExit
|
raise SystemExit
|
||||||
else:
|
else:
|
||||||
raise RuntimeError('Correct usage is:\n{parser.usage}'.format(parser=parser))
|
raise RuntimeError(
|
||||||
|
"Correct usage is:\n{parser.usage}".format(parser=parser)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def is_argument_valid(arg_name, arg_value, overwrite):
|
def is_argument_valid(arg_name, arg_value, overwrite):
|
||||||
# Basic checks for the arguments
|
# Basic checks for the arguments
|
||||||
if arg_name == TEMPLATE_ARG:
|
if arg_name == TEMPLATE_ARG:
|
||||||
return os.path.isfile(arg_value) and arg_value.endswith('.docx')
|
return os.path.isfile(arg_value) and arg_value.endswith(".docx")
|
||||||
elif arg_name == JSON_ARG:
|
elif arg_name == JSON_ARG:
|
||||||
return os.path.isfile(arg_value) and arg_value.endswith('.json')
|
return os.path.isfile(arg_value) and arg_value.endswith(".json")
|
||||||
elif arg_name == OUTPUT_ARG:
|
elif arg_name == OUTPUT_ARG:
|
||||||
return arg_value.endswith('.docx') and check_exists_ask_overwrite(
|
return arg_value.endswith(".docx") and check_exists_ask_overwrite(
|
||||||
arg_value, overwrite)
|
arg_value, overwrite
|
||||||
|
)
|
||||||
elif arg_name in [OVERWRITE_ARG, QUIET_ARG]:
|
elif arg_name in [OVERWRITE_ARG, QUIET_ARG]:
|
||||||
return arg_value in [True, False]
|
return arg_value in [True, False]
|
||||||
|
|
||||||
@ -65,13 +77,18 @@ def check_exists_ask_overwrite(arg_value, overwrite):
|
|||||||
# confirmed returns True, else raises OSError.
|
# confirmed returns True, else raises OSError.
|
||||||
if os.path.exists(arg_value) and not overwrite:
|
if os.path.exists(arg_value) and not overwrite:
|
||||||
try:
|
try:
|
||||||
msg = 'File %s already exists, would you like to overwrite the existing file? (y/n)' % arg_value
|
msg = (
|
||||||
if input(msg).lower() == 'y':
|
"File %s already exists, would you like to overwrite the existing file? (y/n)"
|
||||||
|
% arg_value
|
||||||
|
)
|
||||||
|
if input(msg).lower() == "y":
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
raise OSError
|
raise OSError
|
||||||
except OSError:
|
except OSError:
|
||||||
raise RuntimeError('File %s already exists, please choose a different name.' % arg_value)
|
raise RuntimeError(
|
||||||
|
"File %s already exists, please choose a different name." % arg_value
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -87,7 +104,8 @@ def validate_all_args(parsed_args):
|
|||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
'The specified {arg_name} "{arg_value}" is not valid.'.format(
|
'The specified {arg_name} "{arg_value}" is not valid.'.format(
|
||||||
arg_name=arg_name, arg_value=arg_value
|
arg_name=arg_name, arg_value=arg_value
|
||||||
))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_json_data(json_path):
|
def get_json_data(json_path):
|
||||||
@ -97,17 +115,18 @@ def get_json_data(json_path):
|
|||||||
return json_data
|
return json_data
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
print(
|
print(
|
||||||
'There was an error on line {e.lineno}, column {e.colno} while trying to parse file {json_path}'.format(
|
"There was an error on line {e.lineno}, column {e.colno} while trying to parse file {json_path}".format(
|
||||||
e=e, json_path=json_path
|
e=e, json_path=json_path
|
||||||
))
|
)
|
||||||
raise RuntimeError('Failed to get json data.')
|
)
|
||||||
|
raise RuntimeError("Failed to get json data.")
|
||||||
|
|
||||||
|
|
||||||
def make_docxtemplate(template_path):
|
def make_docxtemplate(template_path):
|
||||||
try:
|
try:
|
||||||
return DocxTemplate(template_path)
|
return DocxTemplate(template_path)
|
||||||
except TemplateError:
|
except TemplateError:
|
||||||
raise RuntimeError('Could not create docx template.')
|
raise RuntimeError("Could not create docx template.")
|
||||||
|
|
||||||
|
|
||||||
def render_docx(doc, json_data):
|
def render_docx(doc, json_data):
|
||||||
@ -115,7 +134,7 @@ def render_docx(doc, json_data):
|
|||||||
doc.render(json_data)
|
doc.render(json_data)
|
||||||
return doc
|
return doc
|
||||||
except TemplateError:
|
except TemplateError:
|
||||||
raise RuntimeError('An error ocurred while trying to render the docx')
|
raise RuntimeError("An error ocurred while trying to render the docx")
|
||||||
|
|
||||||
|
|
||||||
def save_file(doc, parsed_args):
|
def save_file(doc, parsed_args):
|
||||||
@ -123,10 +142,14 @@ def save_file(doc, parsed_args):
|
|||||||
output_path = parsed_args[OUTPUT_ARG]
|
output_path = parsed_args[OUTPUT_ARG]
|
||||||
doc.save(output_path)
|
doc.save(output_path)
|
||||||
if not parsed_args[QUIET_ARG]:
|
if not parsed_args[QUIET_ARG]:
|
||||||
print('Document successfully generated and saved at {output_path}'.format(output_path=output_path))
|
print(
|
||||||
|
"Document successfully generated and saved at {output_path}".format(
|
||||||
|
output_path=output_path
|
||||||
|
)
|
||||||
|
)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
print('{e.strerror}. Could not save file {e.filename}.'.format(e=e))
|
print("{e.strerror}. Could not save file {e.filename}.".format(e=e))
|
||||||
raise RuntimeError('Failed to save file.')
|
raise RuntimeError("Failed to save file.")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -142,12 +165,12 @@ def main():
|
|||||||
doc = render_docx(doc, json_data)
|
doc = render_docx(doc, json_data)
|
||||||
save_file(doc, parsed_args)
|
save_file(doc, parsed_args)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
print('Error: '+e.__str__())
|
print("Error: " + e.__str__())
|
||||||
return
|
return
|
||||||
finally:
|
finally:
|
||||||
if not parsed_args[QUIET_ARG]:
|
if not parsed_args[QUIET_ARG]:
|
||||||
print('Exiting program!')
|
print("Exiting program!")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@ -7,11 +7,13 @@ Created : 2021-07-30
|
|||||||
from docx.oxml import OxmlElement, parse_xml
|
from docx.oxml import OxmlElement, parse_xml
|
||||||
from docx.oxml.ns import qn
|
from docx.oxml.ns import qn
|
||||||
|
|
||||||
|
|
||||||
class InlineImage(object):
|
class InlineImage(object):
|
||||||
"""Class to generate an inline image
|
"""Class to generate an inline image
|
||||||
|
|
||||||
This is much faster than using Subdoc class.
|
This is much faster than using Subdoc class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tpl = None
|
tpl = None
|
||||||
image_descriptor = None
|
image_descriptor = None
|
||||||
width = None
|
width = None
|
||||||
@ -25,17 +27,21 @@ class InlineImage(object):
|
|||||||
|
|
||||||
def _add_hyperlink(self, run, url, part):
|
def _add_hyperlink(self, run, url, part):
|
||||||
# Create a relationship for the hyperlink
|
# Create a relationship for the hyperlink
|
||||||
r_id = part.relate_to(url, 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink', is_external=True)
|
r_id = part.relate_to(
|
||||||
|
url,
|
||||||
|
"http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink",
|
||||||
|
is_external=True,
|
||||||
|
)
|
||||||
|
|
||||||
# Find the <wp:docPr> and <pic:cNvPr> element
|
# Find the <wp:docPr> and <pic:cNvPr> element
|
||||||
docPr = run.xpath('.//wp:docPr')[0]
|
docPr = run.xpath(".//wp:docPr")[0]
|
||||||
cNvPr = run.xpath('.//pic:cNvPr')[0]
|
cNvPr = run.xpath(".//pic:cNvPr")[0]
|
||||||
|
|
||||||
# Create the <a:hlinkClick> element
|
# Create the <a:hlinkClick> element
|
||||||
hlinkClick1 = OxmlElement('a:hlinkClick')
|
hlinkClick1 = OxmlElement("a:hlinkClick")
|
||||||
hlinkClick1.set(qn('r:id'), r_id)
|
hlinkClick1.set(qn("r:id"), r_id)
|
||||||
hlinkClick2 = OxmlElement('a:hlinkClick')
|
hlinkClick2 = OxmlElement("a:hlinkClick")
|
||||||
hlinkClick2.set(qn('r:id'), r_id)
|
hlinkClick2.set(qn("r:id"), r_id)
|
||||||
|
|
||||||
# Insert the <a:hlinkClick> element right after the <wp:docPr> element
|
# Insert the <a:hlinkClick> element right after the <wp:docPr> element
|
||||||
docPr.append(hlinkClick1)
|
docPr.append(hlinkClick1)
|
||||||
@ -51,12 +57,16 @@ class InlineImage(object):
|
|||||||
).xml
|
).xml
|
||||||
if self.anchor:
|
if self.anchor:
|
||||||
run = parse_xml(pic)
|
run = parse_xml(pic)
|
||||||
if run.xpath('.//a:blip'):
|
if run.xpath(".//a:blip"):
|
||||||
hyperlink = self._add_hyperlink(run, self.anchor, self.tpl.current_rendering_part)
|
hyperlink = self._add_hyperlink(
|
||||||
|
run, self.anchor, self.tpl.current_rendering_part
|
||||||
|
)
|
||||||
pic = hyperlink.xml
|
pic = hyperlink.xml
|
||||||
|
|
||||||
return '</w:t></w:r><w:r><w:drawing>%s</w:drawing></w:r><w:r>' \
|
return (
|
||||||
'<w:t xml:space="preserve">' % pic
|
"</w:t></w:r><w:r><w:drawing>%s</w:drawing></w:r><w:r>"
|
||||||
|
'<w:t xml:space="preserve">' % pic
|
||||||
|
)
|
||||||
|
|
||||||
def __unicode__(self):
|
def __unicode__(self):
|
||||||
return self._insert_image()
|
return self._insert_image()
|
||||||
|
|||||||
@ -5,6 +5,7 @@ Created : 2021-07-30
|
|||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
"""
|
"""
|
||||||
import six
|
import six
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from html import escape
|
from html import escape
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -19,6 +20,7 @@ class Listing(object):
|
|||||||
use {{ mylisting }} in your template and
|
use {{ mylisting }} in your template and
|
||||||
context={ mylisting:Listing(the_listing_with_newlines) }
|
context={ mylisting:Listing(the_listing_with_newlines) }
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, text):
|
def __init__(self, text):
|
||||||
# If not a string : cast to string (ex: int, dict etc...)
|
# If not a string : cast to string (ex: int, dict etc...)
|
||||||
if not isinstance(text, (six.text_type, six.binary_type)):
|
if not isinstance(text, (six.text_type, six.binary_type)):
|
||||||
|
|||||||
@ -5,6 +5,7 @@ Created : 2021-07-30
|
|||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
"""
|
"""
|
||||||
import six
|
import six
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from html import escape
|
from html import escape
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -13,29 +14,33 @@ except ImportError:
|
|||||||
|
|
||||||
|
|
||||||
class RichText(object):
|
class RichText(object):
|
||||||
""" class to generate Rich Text when using templates variables
|
"""class to generate Rich Text when using templates variables
|
||||||
|
|
||||||
This is much faster than using Subdoc class,
|
This is much faster than using Subdoc class,
|
||||||
but this only for texts INSIDE an existing paragraph.
|
but this only for texts INSIDE an existing paragraph.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, text=None, **text_prop):
|
def __init__(self, text=None, **text_prop):
|
||||||
self.xml = ''
|
self.xml = ""
|
||||||
if text:
|
if text:
|
||||||
self.add(text, **text_prop)
|
self.add(text, **text_prop)
|
||||||
|
|
||||||
def add(self, text,
|
def add(
|
||||||
style=None,
|
self,
|
||||||
color=None,
|
text,
|
||||||
highlight=None,
|
style=None,
|
||||||
size=None,
|
color=None,
|
||||||
subscript=None,
|
highlight=None,
|
||||||
superscript=None,
|
size=None,
|
||||||
bold=False,
|
subscript=None,
|
||||||
italic=False,
|
superscript=None,
|
||||||
underline=False,
|
bold=False,
|
||||||
strike=False,
|
italic=False,
|
||||||
font=None,
|
underline=False,
|
||||||
url_id=None):
|
strike=False,
|
||||||
|
font=None,
|
||||||
|
url_id=None,
|
||||||
|
):
|
||||||
|
|
||||||
# If a RichText is added
|
# If a RichText is added
|
||||||
if isinstance(text, RichText):
|
if isinstance(text, RichText):
|
||||||
@ -46,55 +51,65 @@ class RichText(object):
|
|||||||
if not isinstance(text, (six.text_type, six.binary_type)):
|
if not isinstance(text, (six.text_type, six.binary_type)):
|
||||||
text = six.text_type(text)
|
text = six.text_type(text)
|
||||||
if not isinstance(text, six.text_type):
|
if not isinstance(text, six.text_type):
|
||||||
text = text.decode('utf-8', errors='ignore')
|
text = text.decode("utf-8", errors="ignore")
|
||||||
text = escape(text)
|
text = escape(text)
|
||||||
|
|
||||||
prop = u''
|
prop = ""
|
||||||
|
|
||||||
if style:
|
if style:
|
||||||
prop += u'<w:rStyle w:val="%s"/>' % style
|
prop += '<w:rStyle w:val="%s"/>' % style
|
||||||
if color:
|
if color:
|
||||||
if color[0] == '#':
|
if color[0] == "#":
|
||||||
color = color[1:]
|
color = color[1:]
|
||||||
prop += u'<w:color w:val="%s"/>' % color
|
prop += '<w:color w:val="%s"/>' % color
|
||||||
if highlight:
|
if highlight:
|
||||||
if highlight[0] == '#':
|
if highlight[0] == "#":
|
||||||
highlight = highlight[1:]
|
highlight = highlight[1:]
|
||||||
prop += u'<w:shd w:fill="%s"/>' % highlight
|
prop += '<w:shd w:fill="%s"/>' % highlight
|
||||||
if size:
|
if size:
|
||||||
prop += u'<w:sz w:val="%s"/>' % size
|
prop += '<w:sz w:val="%s"/>' % size
|
||||||
prop += u'<w:szCs w:val="%s"/>' % size
|
prop += '<w:szCs w:val="%s"/>' % size
|
||||||
if subscript:
|
if subscript:
|
||||||
prop += u'<w:vertAlign w:val="subscript"/>'
|
prop += '<w:vertAlign w:val="subscript"/>'
|
||||||
if superscript:
|
if superscript:
|
||||||
prop += u'<w:vertAlign w:val="superscript"/>'
|
prop += '<w:vertAlign w:val="superscript"/>'
|
||||||
if bold:
|
if bold:
|
||||||
prop += u'<w:b/>'
|
prop += "<w:b/>"
|
||||||
if italic:
|
if italic:
|
||||||
prop += u'<w:i/>'
|
prop += "<w:i/>"
|
||||||
if underline:
|
if underline:
|
||||||
if underline not in ['single', 'double', 'thick', 'dotted', 'dash', 'dotDash', 'dotDotDash', 'wave']:
|
if underline not in [
|
||||||
underline = 'single'
|
"single",
|
||||||
prop += u'<w:u w:val="%s"/>' % underline
|
"double",
|
||||||
|
"thick",
|
||||||
|
"dotted",
|
||||||
|
"dash",
|
||||||
|
"dotDash",
|
||||||
|
"dotDotDash",
|
||||||
|
"wave",
|
||||||
|
]:
|
||||||
|
underline = "single"
|
||||||
|
prop += '<w:u w:val="%s"/>' % underline
|
||||||
if strike:
|
if strike:
|
||||||
prop += u'<w:strike/>'
|
prop += "<w:strike/>"
|
||||||
if font:
|
if font:
|
||||||
regional_font = u''
|
regional_font = ""
|
||||||
if ':' in font:
|
if ":" in font:
|
||||||
region, font = font.split(':', 1)
|
region, font = font.split(":", 1)
|
||||||
regional_font = u' w:{region}="{font}"'.format(font=font, region=region)
|
regional_font = ' w:{region}="{font}"'.format(font=font, region=region)
|
||||||
prop += (
|
prop += '<w:rFonts w:ascii="{font}" w:hAnsi="{font}" w:cs="{font}"{regional_font}/>'.format(
|
||||||
u'<w:rFonts w:ascii="{font}" w:hAnsi="{font}" w:cs="{font}"{regional_font}/>'
|
font=font, regional_font=regional_font
|
||||||
.format(font=font, regional_font=regional_font)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
xml = u'<w:r>'
|
xml = "<w:r>"
|
||||||
if prop:
|
if prop:
|
||||||
xml += u'<w:rPr>%s</w:rPr>' % prop
|
xml += "<w:rPr>%s</w:rPr>" % prop
|
||||||
xml += u'<w:t xml:space="preserve">%s</w:t></w:r>' % text
|
xml += '<w:t xml:space="preserve">%s</w:t></w:r>' % text
|
||||||
if url_id:
|
if url_id:
|
||||||
xml = (u'<w:hyperlink r:id="%s" w:tgtFrame="_blank">%s</w:hyperlink>'
|
xml = '<w:hyperlink r:id="%s" w:tgtFrame="_blank">%s</w:hyperlink>' % (
|
||||||
% (url_id, xml))
|
url_id,
|
||||||
|
xml,
|
||||||
|
)
|
||||||
self.xml += xml
|
self.xml += xml
|
||||||
|
|
||||||
def __unicode__(self):
|
def __unicode__(self):
|
||||||
|
|||||||
@ -18,8 +18,8 @@ import re
|
|||||||
|
|
||||||
class SubdocComposer(Composer):
|
class SubdocComposer(Composer):
|
||||||
def attach_parts(self, doc, remove_property_fields=True):
|
def attach_parts(self, doc, remove_property_fields=True):
|
||||||
""" Attach docx parts instead of appending the whole document
|
"""Attach docx parts instead of appending the whole document
|
||||||
thus subdoc insertion can be delegated to jinja2 """
|
thus subdoc insertion can be delegated to jinja2"""
|
||||||
self.reset_reference_mapping()
|
self.reset_reference_mapping()
|
||||||
|
|
||||||
# Remove custom property fields but keep the values
|
# Remove custom property fields but keep the values
|
||||||
@ -51,22 +51,23 @@ class SubdocComposer(Composer):
|
|||||||
|
|
||||||
def add_diagrams(self, doc, element):
|
def add_diagrams(self, doc, element):
|
||||||
# While waiting docxcompose 1.3.3
|
# While waiting docxcompose 1.3.3
|
||||||
dgm_rels = xpath(element, './/dgm:relIds[@r:dm]')
|
dgm_rels = xpath(element, ".//dgm:relIds[@r:dm]")
|
||||||
for dgm_rel in dgm_rels:
|
for dgm_rel in dgm_rels:
|
||||||
for item, rt_type in (
|
for item, rt_type in (
|
||||||
('dm', RT.DIAGRAM_DATA),
|
("dm", RT.DIAGRAM_DATA),
|
||||||
('lo', RT.DIAGRAM_LAYOUT),
|
("lo", RT.DIAGRAM_LAYOUT),
|
||||||
('qs', RT.DIAGRAM_QUICK_STYLE),
|
("qs", RT.DIAGRAM_QUICK_STYLE),
|
||||||
('cs', RT.DIAGRAM_COLORS)
|
("cs", RT.DIAGRAM_COLORS),
|
||||||
):
|
):
|
||||||
dm_rid = dgm_rel.get('{%s}%s' % (NS['r'], item))
|
dm_rid = dgm_rel.get("{%s}%s" % (NS["r"], item))
|
||||||
dm_part = doc.part.rels[dm_rid].target_part
|
dm_part = doc.part.rels[dm_rid].target_part
|
||||||
new_rid = self.doc.part.relate_to(dm_part, rt_type)
|
new_rid = self.doc.part.relate_to(dm_part, rt_type)
|
||||||
dgm_rel.set('{%s}%s' % (NS['r'], item), new_rid)
|
dgm_rel.set("{%s}%s" % (NS["r"], item), new_rid)
|
||||||
|
|
||||||
|
|
||||||
class Subdoc(object):
|
class Subdoc(object):
|
||||||
""" Class for subdocument to insert into master document """
|
"""Class for subdocument to insert into master document"""
|
||||||
|
|
||||||
def __init__(self, tpl, docpath=None):
|
def __init__(self, tpl, docpath=None):
|
||||||
self.tpl = tpl
|
self.tpl = tpl
|
||||||
self.docx = tpl.get_docx()
|
self.docx = tpl.get_docx()
|
||||||
@ -83,8 +84,13 @@ class Subdoc(object):
|
|||||||
def _get_xml(self):
|
def _get_xml(self):
|
||||||
if self.subdocx.element.body.sectPr is not None:
|
if self.subdocx.element.body.sectPr is not None:
|
||||||
self.subdocx.element.body.remove(self.subdocx.element.body.sectPr)
|
self.subdocx.element.body.remove(self.subdocx.element.body.sectPr)
|
||||||
xml = re.sub(r'</?w:body[^>]*>', '', etree.tostring(
|
xml = re.sub(
|
||||||
self.subdocx.element.body, encoding='unicode', pretty_print=False))
|
r"</?w:body[^>]*>",
|
||||||
|
"",
|
||||||
|
etree.tostring(
|
||||||
|
self.subdocx.element.body, encoding="unicode", pretty_print=False
|
||||||
|
),
|
||||||
|
)
|
||||||
return xml
|
return xml
|
||||||
|
|
||||||
def __unicode__(self):
|
def __unicode__(self):
|
||||||
|
|||||||
@ -18,6 +18,7 @@ import docx.oxml.ns
|
|||||||
from docx.opc.constants import RELATIONSHIP_TYPE as REL_TYPE
|
from docx.opc.constants import RELATIONSHIP_TYPE as REL_TYPE
|
||||||
from jinja2 import Environment, Template, meta
|
from jinja2 import Environment, Template, meta
|
||||||
from jinja2.exceptions import TemplateError
|
from jinja2.exceptions import TemplateError
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from html import escape # noqa: F401
|
from html import escape # noqa: F401
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -31,10 +32,14 @@ import zipfile
|
|||||||
|
|
||||||
|
|
||||||
class DocxTemplate(object):
|
class DocxTemplate(object):
|
||||||
""" Class for managing docx files as they were jinja2 templates """
|
"""Class for managing docx files as they were jinja2 templates"""
|
||||||
|
|
||||||
HEADER_URI = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/header"
|
HEADER_URI = (
|
||||||
FOOTER_URI = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/footer"
|
"http://schemas.openxmlformats.org/officeDocument/2006/relationships/header"
|
||||||
|
)
|
||||||
|
FOOTER_URI = (
|
||||||
|
"http://schemas.openxmlformats.org/officeDocument/2006/relationships/footer"
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, template_file: Union[IO[bytes], str, PathLike]) -> None:
|
def __init__(self, template_file: Union[IO[bytes], str, PathLike]) -> None:
|
||||||
self.template_file = template_file
|
self.template_file = template_file
|
||||||
@ -58,10 +63,10 @@ class DocxTemplate(object):
|
|||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
return getattr(self.docx, name)
|
return getattr(self.docx, name)
|
||||||
|
|
||||||
def xml_to_string(self, xml, encoding='unicode'):
|
def xml_to_string(self, xml, encoding="unicode"):
|
||||||
# Be careful : pretty_print MUST be set to False, otherwise patch_xml()
|
# Be careful : pretty_print MUST be set to False, otherwise patch_xml()
|
||||||
# won't work properly
|
# won't work properly
|
||||||
return etree.tostring(xml, encoding='unicode', pretty_print=False)
|
return etree.tostring(xml, encoding="unicode", pretty_print=False)
|
||||||
|
|
||||||
def get_docx(self):
|
def get_docx(self):
|
||||||
self.init_docx()
|
self.init_docx()
|
||||||
@ -71,121 +76,178 @@ class DocxTemplate(object):
|
|||||||
return self.xml_to_string(self.docx._element.body)
|
return self.xml_to_string(self.docx._element.body)
|
||||||
|
|
||||||
def write_xml(self, filename):
|
def write_xml(self, filename):
|
||||||
with open(filename, 'w') as fh:
|
with open(filename, "w") as fh:
|
||||||
fh.write(self.get_xml())
|
fh.write(self.get_xml())
|
||||||
|
|
||||||
def patch_xml(self, src_xml):
|
def patch_xml(self, src_xml):
|
||||||
""" Make a lots of cleaning to have a raw xml understandable by jinja2 :
|
"""Make a lots of cleaning to have a raw xml understandable by jinja2 :
|
||||||
strip all unnecessary xml tags, manage table cell background color and colspan,
|
strip all unnecessary xml tags, manage table cell background color and colspan,
|
||||||
unescape html entities, etc... """
|
unescape html entities, etc..."""
|
||||||
|
|
||||||
# replace {<something>{ by {{ ( works with {{ }} {% and %} {# and #})
|
# replace {<something>{ by {{ ( works with {{ }} {% and %} {# and #})
|
||||||
src_xml = re.sub(r'(?<={)(<[^>]*>)+(?=[\{%\#])|(?<=[%\}\#])(<[^>]*>)+(?=\})', '',
|
src_xml = re.sub(
|
||||||
src_xml, flags=re.DOTALL)
|
r"(?<={)(<[^>]*>)+(?=[\{%\#])|(?<=[%\}\#])(<[^>]*>)+(?=\})",
|
||||||
|
"",
|
||||||
|
src_xml,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
# replace {{<some tags>jinja2 stuff<some other tags>}} by {{jinja2 stuff}}
|
# replace {{<some tags>jinja2 stuff<some other tags>}} by {{jinja2 stuff}}
|
||||||
# same thing with {% ... %} and {# #}
|
# same thing with {% ... %} and {# #}
|
||||||
# "jinja2 stuff" could a variable, a 'if' etc... anything jinja2 will understand
|
# "jinja2 stuff" could a variable, a 'if' etc... anything jinja2 will understand
|
||||||
def striptags(m):
|
def striptags(m):
|
||||||
return re.sub('</w:t>.*?(<w:t>|<w:t [^>]*>)', '',
|
return re.sub(
|
||||||
m.group(0), flags=re.DOTALL)
|
"</w:t>.*?(<w:t>|<w:t [^>]*>)", "", m.group(0), flags=re.DOTALL
|
||||||
src_xml = re.sub(r'{%(?:(?!%}).)*|{#(?:(?!#}).)*|{{(?:(?!}}).)*', striptags,
|
)
|
||||||
src_xml, flags=re.DOTALL)
|
|
||||||
|
src_xml = re.sub(
|
||||||
|
r"{%(?:(?!%}).)*|{#(?:(?!#}).)*|{{(?:(?!}}).)*",
|
||||||
|
striptags,
|
||||||
|
src_xml,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
# manage table cell colspan
|
# manage table cell colspan
|
||||||
def colspan(m):
|
def colspan(m):
|
||||||
cell_xml = m.group(1) + m.group(3)
|
cell_xml = m.group(1) + m.group(3)
|
||||||
cell_xml = re.sub(r'<w:r[ >](?:(?!<w:r[ >]).)*<w:t></w:t>.*?</w:r>',
|
cell_xml = re.sub(
|
||||||
'', cell_xml, flags=re.DOTALL)
|
r"<w:r[ >](?:(?!<w:r[ >]).)*<w:t></w:t>.*?</w:r>",
|
||||||
cell_xml = re.sub(r'<w:gridSpan[^/]*/>', '', cell_xml, count=1)
|
"",
|
||||||
return re.sub(r'(<w:tcPr[^>]*>)', r'\1<w:gridSpan w:val="{{%s}}"/>'
|
cell_xml,
|
||||||
% m.group(2), cell_xml)
|
flags=re.DOTALL,
|
||||||
src_xml = re.sub(r'(<w:tc[ >](?:(?!<w:tc[ >]).)*){%\s*colspan\s+([^%]*)\s*%}(.*?</w:tc>)',
|
)
|
||||||
colspan, src_xml, flags=re.DOTALL)
|
cell_xml = re.sub(r"<w:gridSpan[^/]*/>", "", cell_xml, count=1)
|
||||||
|
return re.sub(
|
||||||
|
r"(<w:tcPr[^>]*>)",
|
||||||
|
r'\1<w:gridSpan w:val="{{%s}}"/>' % m.group(2),
|
||||||
|
cell_xml,
|
||||||
|
)
|
||||||
|
|
||||||
|
src_xml = re.sub(
|
||||||
|
r"(<w:tc[ >](?:(?!<w:tc[ >]).)*){%\s*colspan\s+([^%]*)\s*%}(.*?</w:tc>)",
|
||||||
|
colspan,
|
||||||
|
src_xml,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
# manage table cell background color
|
# manage table cell background color
|
||||||
def cellbg(m):
|
def cellbg(m):
|
||||||
cell_xml = m.group(1) + m.group(3)
|
cell_xml = m.group(1) + m.group(3)
|
||||||
cell_xml = re.sub(r'<w:r[ >](?:(?!<w:r[ >]).)*<w:t></w:t>.*?</w:r>',
|
cell_xml = re.sub(
|
||||||
'', cell_xml, flags=re.DOTALL)
|
r"<w:r[ >](?:(?!<w:r[ >]).)*<w:t></w:t>.*?</w:r>",
|
||||||
cell_xml = re.sub(r'<w:shd[^/]*/>', '', cell_xml, count=1)
|
"",
|
||||||
return re.sub(r'(<w:tcPr[^>]*>)',
|
cell_xml,
|
||||||
r'\1<w:shd w:val="clear" w:color="auto" w:fill="{{%s}}"/>'
|
flags=re.DOTALL,
|
||||||
% m.group(2), cell_xml)
|
)
|
||||||
src_xml = re.sub(r'(<w:tc[ >](?:(?!<w:tc[ >]).)*){%\s*cellbg\s+([^%]*)\s*%}(.*?</w:tc>)',
|
cell_xml = re.sub(r"<w:shd[^/]*/>", "", cell_xml, count=1)
|
||||||
cellbg, src_xml, flags=re.DOTALL)
|
return re.sub(
|
||||||
|
r"(<w:tcPr[^>]*>)",
|
||||||
|
r'\1<w:shd w:val="clear" w:color="auto" w:fill="{{%s}}"/>' % m.group(2),
|
||||||
|
cell_xml,
|
||||||
|
)
|
||||||
|
|
||||||
|
src_xml = re.sub(
|
||||||
|
r"(<w:tc[ >](?:(?!<w:tc[ >]).)*){%\s*cellbg\s+([^%]*)\s*%}(.*?</w:tc>)",
|
||||||
|
cellbg,
|
||||||
|
src_xml,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
# ensure space preservation
|
# ensure space preservation
|
||||||
src_xml = re.sub(r'<w:t>((?:(?!<w:t>).)*)({{.*?}}|{%.*?%})',
|
src_xml = re.sub(
|
||||||
r'<w:t xml:space="preserve">\1\2',
|
r"<w:t>((?:(?!<w:t>).)*)({{.*?}}|{%.*?%})",
|
||||||
src_xml, flags=re.DOTALL)
|
r'<w:t xml:space="preserve">\1\2',
|
||||||
src_xml = re.sub(r'({{r\s.*?}}|{%r\s.*?%})',
|
src_xml,
|
||||||
r'</w:t></w:r><w:r><w:t xml:space="preserve">\1</w:t></w:r><w:r><w:t xml:space="preserve">',
|
flags=re.DOTALL,
|
||||||
src_xml, flags=re.DOTALL)
|
)
|
||||||
|
src_xml = re.sub(
|
||||||
|
r"({{r\s.*?}}|{%r\s.*?%})",
|
||||||
|
r'</w:t></w:r><w:r><w:t xml:space="preserve">\1</w:t></w:r><w:r><w:t xml:space="preserve">',
|
||||||
|
src_xml,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
# {%- will merge with previous paragraph text
|
# {%- will merge with previous paragraph text
|
||||||
src_xml = re.sub(r'</w:t>(?:(?!</w:t>).)*?{%-', '{%', src_xml, flags=re.DOTALL)
|
src_xml = re.sub(r"</w:t>(?:(?!</w:t>).)*?{%-", "{%", src_xml, flags=re.DOTALL)
|
||||||
# -%} will merge with next paragraph text
|
# -%} will merge with next paragraph text
|
||||||
src_xml = re.sub(r'-%}(?:(?!<w:t[ >]|{%|{{).)*?<w:t[^>]*?>', '%}', src_xml, flags=re.DOTALL)
|
src_xml = re.sub(
|
||||||
|
r"-%}(?:(?!<w:t[ >]|{%|{{).)*?<w:t[^>]*?>", "%}", src_xml, flags=re.DOTALL
|
||||||
|
)
|
||||||
|
|
||||||
for y in ['tr', 'tc', 'p', 'r']:
|
for y in ["tr", "tc", "p", "r"]:
|
||||||
# replace into xml code the row/paragraph/run containing
|
# replace into xml code the row/paragraph/run containing
|
||||||
# {%y xxx %} or {{y xxx}} template tag
|
# {%y xxx %} or {{y xxx}} template tag
|
||||||
# by {% xxx %} or {{ xx }} without any surrounding <w:y> tags :
|
# by {% xxx %} or {{ xx }} without any surrounding <w:y> tags :
|
||||||
# This is mandatory to have jinja2 generating correct xml code
|
# This is mandatory to have jinja2 generating correct xml code
|
||||||
pat = r'<w:%(y)s[ >](?:(?!<w:%(y)s[ >]).)*({%%|{{)%(y)s ([^}%%]*(?:%%}|}})).*?</w:%(y)s>' % {'y': y}
|
pat = (
|
||||||
src_xml = re.sub(pat, r'\1 \2', src_xml, flags=re.DOTALL)
|
r"<w:%(y)s[ >](?:(?!<w:%(y)s[ >]).)*({%%|{{)%(y)s ([^}%%]*(?:%%}|}})).*?</w:%(y)s>"
|
||||||
|
% {"y": y}
|
||||||
|
)
|
||||||
|
src_xml = re.sub(pat, r"\1 \2", src_xml, flags=re.DOTALL)
|
||||||
|
|
||||||
for y in ['tr', 'tc', 'p']:
|
for y in ["tr", "tc", "p"]:
|
||||||
# same thing, but for {#y xxx #} (but not where y == 'r', since that
|
# same thing, but for {#y xxx #} (but not where y == 'r', since that
|
||||||
# makes less sense to use comments in that context
|
# makes less sense to use comments in that context
|
||||||
pat = r'<w:%(y)s[ >](?:(?!<w:%(y)s[ >]).)*({#)%(y)s ([^}#]*(?:#})).*?</w:%(y)s>' % {'y': y}
|
pat = (
|
||||||
src_xml = re.sub(pat, r'\1 \2', src_xml, flags=re.DOTALL)
|
r"<w:%(y)s[ >](?:(?!<w:%(y)s[ >]).)*({#)%(y)s ([^}#]*(?:#})).*?</w:%(y)s>"
|
||||||
|
% {"y": y}
|
||||||
|
)
|
||||||
|
src_xml = re.sub(pat, r"\1 \2", src_xml, flags=re.DOTALL)
|
||||||
|
|
||||||
# add vMerge
|
# add vMerge
|
||||||
# use {% vm %} to make this table cell and its copies be vertically merged within a {% for %}
|
# use {% vm %} to make this table cell and its copies be vertically merged within a {% for %}
|
||||||
def v_merge_tc(m):
|
def v_merge_tc(m):
|
||||||
def v_merge(m1):
|
def v_merge(m1):
|
||||||
return (
|
return (
|
||||||
'<w:vMerge w:val="{% if loop.first %}restart{% else %}continue{% endif %}"/>' +
|
'<w:vMerge w:val="{% if loop.first %}restart{% else %}continue{% endif %}"/>'
|
||||||
m1.group(1) + # Everything between ``</w:tcPr>`` and ``<w:t>``.
|
+ m1.group(1) # Everything between ``</w:tcPr>`` and ``<w:t>``.
|
||||||
"{% if loop.first %}" +
|
+ "{% if loop.first %}"
|
||||||
m1.group(2) + # Everything before ``{% vm %}``.
|
+ m1.group(2) # Everything before ``{% vm %}``.
|
||||||
m1.group(3) + # Everything after ``{% vm %}``.
|
+ m1.group(3) # Everything after ``{% vm %}``.
|
||||||
"{% endif %}" +
|
+ "{% endif %}"
|
||||||
m1.group(4) # ``</w:t>``.
|
+ m1.group(4) # ``</w:t>``.
|
||||||
)
|
)
|
||||||
|
|
||||||
return re.sub(
|
return re.sub(
|
||||||
r'(</w:tcPr[ >].*?<w:t(?:.*?)>)(.*?)(?:{%\s*vm\s*%})(.*?)(</w:t>)',
|
r"(</w:tcPr[ >].*?<w:t(?:.*?)>)(.*?)(?:{%\s*vm\s*%})(.*?)(</w:t>)",
|
||||||
v_merge,
|
v_merge,
|
||||||
m.group(), # Everything between ``</w:tc>`` and ``</w:tc>`` with ``{% vm %}`` inside.
|
m.group(), # Everything between ``</w:tc>`` and ``</w:tc>`` with ``{% vm %}`` inside.
|
||||||
flags=re.DOTALL,
|
flags=re.DOTALL,
|
||||||
)
|
)
|
||||||
src_xml = re.sub(r'<w:tc[ >](?:(?!<w:tc[ >]).)*?{%\s*vm\s*%}.*?</w:tc[ >]',
|
|
||||||
v_merge_tc, src_xml, flags=re.DOTALL)
|
src_xml = re.sub(
|
||||||
|
r"<w:tc[ >](?:(?!<w:tc[ >]).)*?{%\s*vm\s*%}.*?</w:tc[ >]",
|
||||||
|
v_merge_tc,
|
||||||
|
src_xml,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
# Use ``{% hm %}`` to make table cell become horizontally merged within
|
# Use ``{% hm %}`` to make table cell become horizontally merged within
|
||||||
# a ``{% for %}``.
|
# a ``{% for %}``.
|
||||||
def h_merge_tc(m):
|
def h_merge_tc(m):
|
||||||
xml_to_patch = m.group() # Everything between ``</w:tc>`` and ``</w:tc>`` with ``{% hm %}`` inside.
|
xml_to_patch = (
|
||||||
|
m.group()
|
||||||
|
) # Everything between ``</w:tc>`` and ``</w:tc>`` with ``{% hm %}`` inside.
|
||||||
|
|
||||||
def with_gridspan(m1):
|
def with_gridspan(m1):
|
||||||
return (
|
return (
|
||||||
m1.group(1) + # ``w:gridSpan w:val="``.
|
m1.group(1) # ``w:gridSpan w:val="``.
|
||||||
'{{ ' + m1.group(2) + ' * loop.length }}' + # Content of ``w:val``, multiplied by loop length.
|
+ "{{ "
|
||||||
m1.group(3) # Closing quotation mark.
|
+ m1.group(2)
|
||||||
|
+ " * loop.length }}" # Content of ``w:val``, multiplied by loop length.
|
||||||
|
+ m1.group(3) # Closing quotation mark.
|
||||||
)
|
)
|
||||||
|
|
||||||
def without_gridspan(m2):
|
def without_gridspan(m2):
|
||||||
return (
|
return (
|
||||||
'<w:gridSpan w:val="{{ loop.length }}"/>' +
|
'<w:gridSpan w:val="{{ loop.length }}"/>'
|
||||||
m2.group(1) + # Everything between ``</w:tcPr>`` and ``<w:t>``.
|
+ m2.group(1) # Everything between ``</w:tcPr>`` and ``<w:t>``.
|
||||||
m2.group(2) + # Everything before ``{% hm %}``.
|
+ m2.group(2) # Everything before ``{% hm %}``.
|
||||||
m2.group(3) + # Everything after ``{% hm %}``.
|
+ m2.group(3) # Everything after ``{% hm %}``.
|
||||||
m2.group(4) # ``</w:t>``.
|
+ m2.group(4) # ``</w:t>``.
|
||||||
)
|
)
|
||||||
|
|
||||||
if re.search(r'w:gridSpan', xml_to_patch):
|
if re.search(r"w:gridSpan", xml_to_patch):
|
||||||
# Simple case, there's already ``gridSpan``, multiply its value.
|
# Simple case, there's already ``gridSpan``, multiply its value.
|
||||||
|
|
||||||
xml = re.sub(
|
xml = re.sub(
|
||||||
@ -195,15 +257,15 @@ class DocxTemplate(object):
|
|||||||
flags=re.DOTALL,
|
flags=re.DOTALL,
|
||||||
)
|
)
|
||||||
xml = re.sub(
|
xml = re.sub(
|
||||||
r'{%\s*hm\s*%}',
|
r"{%\s*hm\s*%}",
|
||||||
'',
|
"",
|
||||||
xml, # Patched xml.
|
xml, # Patched xml.
|
||||||
flags=re.DOTALL,
|
flags=re.DOTALL,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# There're no ``gridSpan``, add one.
|
# There're no ``gridSpan``, add one.
|
||||||
xml = re.sub(
|
xml = re.sub(
|
||||||
r'(</w:tcPr[ >].*?<w:t(?:.*?)>)(.*?)(?:{%\s*hm\s*%})(.*?)(</w:t>)',
|
r"(</w:tcPr[ >].*?<w:t(?:.*?)>)(.*?)(?:{%\s*hm\s*%})(.*?)(</w:t>)",
|
||||||
without_gridspan,
|
without_gridspan,
|
||||||
xml_to_patch,
|
xml_to_patch,
|
||||||
flags=re.DOTALL,
|
flags=re.DOTALL,
|
||||||
@ -212,24 +274,31 @@ class DocxTemplate(object):
|
|||||||
# Discard every other cell generated in loop.
|
# Discard every other cell generated in loop.
|
||||||
return "{% if loop.first %}" + xml + "{% endif %}"
|
return "{% if loop.first %}" + xml + "{% endif %}"
|
||||||
|
|
||||||
src_xml = re.sub(r'<w:tc[ >](?:(?!<w:tc[ >]).)*?{%\s*hm\s*%}.*?</w:tc[ >]',
|
src_xml = re.sub(
|
||||||
h_merge_tc, src_xml, flags=re.DOTALL)
|
r"<w:tc[ >](?:(?!<w:tc[ >]).)*?{%\s*hm\s*%}.*?</w:tc[ >]",
|
||||||
|
h_merge_tc,
|
||||||
|
src_xml,
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
def clean_tags(m):
|
def clean_tags(m):
|
||||||
return (m.group(0)
|
return (
|
||||||
.replace(r"‘", "'")
|
m.group(0)
|
||||||
.replace('<', '<')
|
.replace(r"‘", "'")
|
||||||
.replace('>', '>')
|
.replace("<", "<")
|
||||||
.replace(u'“', u'"')
|
.replace(">", ">")
|
||||||
.replace(u'”', u'"')
|
.replace("“", '"')
|
||||||
.replace(u"‘", u"'")
|
.replace("”", '"')
|
||||||
.replace(u"’", u"'"))
|
.replace("‘", "'")
|
||||||
src_xml = re.sub(r'(?<=\{[\{%])(.*?)(?=[\}%]})', clean_tags, src_xml)
|
.replace("’", "'")
|
||||||
|
)
|
||||||
|
|
||||||
|
src_xml = re.sub(r"(?<=\{[\{%])(.*?)(?=[\}%]})", clean_tags, src_xml)
|
||||||
|
|
||||||
return src_xml
|
return src_xml
|
||||||
|
|
||||||
def render_xml_part(self, src_xml, part, context, jinja_env=None):
|
def render_xml_part(self, src_xml, part, context, jinja_env=None):
|
||||||
src_xml = re.sub(r'<w:p([ >])', r'\n<w:p\1', src_xml)
|
src_xml = re.sub(r"<w:p([ >])", r"\n<w:p\1", src_xml)
|
||||||
try:
|
try:
|
||||||
self.current_rendering_part = part
|
self.current_rendering_part = part
|
||||||
if jinja_env:
|
if jinja_env:
|
||||||
@ -238,34 +307,39 @@ class DocxTemplate(object):
|
|||||||
template = Template(src_xml)
|
template = Template(src_xml)
|
||||||
dst_xml = template.render(context)
|
dst_xml = template.render(context)
|
||||||
except TemplateError as exc:
|
except TemplateError as exc:
|
||||||
if hasattr(exc, 'lineno') and exc.lineno is not None:
|
if hasattr(exc, "lineno") and exc.lineno is not None:
|
||||||
line_number = max(exc.lineno - 4, 0)
|
line_number = max(exc.lineno - 4, 0)
|
||||||
exc.docx_context = map(lambda x: re.sub(r'<[^>]+>', '', x),
|
exc.docx_context = map(
|
||||||
src_xml.splitlines()[line_number:(line_number + 7)])
|
lambda x: re.sub(r"<[^>]+>", "", x),
|
||||||
|
src_xml.splitlines()[line_number : (line_number + 7)],
|
||||||
|
)
|
||||||
raise exc
|
raise exc
|
||||||
dst_xml = re.sub(r'\n<w:p([ >])', r'<w:p\1', dst_xml)
|
dst_xml = re.sub(r"\n<w:p([ >])", r"<w:p\1", dst_xml)
|
||||||
dst_xml = (dst_xml
|
dst_xml = (
|
||||||
.replace('{_{', '{{')
|
dst_xml.replace("{_{", "{{")
|
||||||
.replace('}_}', '}}')
|
.replace("}_}", "}}")
|
||||||
.replace('{_%', '{%')
|
.replace("{_%", "{%")
|
||||||
.replace('%_}', '%}'))
|
.replace("%_}", "%}")
|
||||||
|
)
|
||||||
dst_xml = self.resolve_listing(dst_xml)
|
dst_xml = self.resolve_listing(dst_xml)
|
||||||
return dst_xml
|
return dst_xml
|
||||||
|
|
||||||
def render_properties(self, context: Dict[str, Any], jinja_env: Optional[Environment] = None) -> None:
|
def render_properties(
|
||||||
|
self, context: Dict[str, Any], jinja_env: Optional[Environment] = None
|
||||||
|
) -> None:
|
||||||
# List of string attributes of docx.opc.coreprops.CoreProperties which are strings.
|
# List of string attributes of docx.opc.coreprops.CoreProperties which are strings.
|
||||||
# It seems that some attributes cannot be written as strings. Those are commented out.
|
# It seems that some attributes cannot be written as strings. Those are commented out.
|
||||||
properties = [
|
properties = [
|
||||||
'author',
|
"author",
|
||||||
# 'category',
|
# 'category',
|
||||||
'comments',
|
"comments",
|
||||||
# 'content_status',
|
# 'content_status',
|
||||||
'identifier',
|
"identifier",
|
||||||
# 'keywords',
|
# 'keywords',
|
||||||
'language',
|
"language",
|
||||||
# 'last_modified_by',
|
# 'last_modified_by',
|
||||||
'subject',
|
"subject",
|
||||||
'title',
|
"title",
|
||||||
# 'version',
|
# 'version',
|
||||||
]
|
]
|
||||||
if jinja_env is None:
|
if jinja_env is None:
|
||||||
@ -280,32 +354,53 @@ class DocxTemplate(object):
|
|||||||
def resolve_listing(self, xml):
|
def resolve_listing(self, xml):
|
||||||
|
|
||||||
def resolve_text(run_properties, paragraph_properties, m):
|
def resolve_text(run_properties, paragraph_properties, m):
|
||||||
xml = m.group(0).replace('\t', '</w:t></w:r>'
|
xml = m.group(0).replace(
|
||||||
'<w:r>%s<w:tab/></w:r>'
|
"\t",
|
||||||
'<w:r>%s<w:t xml:space="preserve">' % (run_properties, run_properties))
|
"</w:t></w:r>"
|
||||||
xml = xml.replace('\a', '</w:t></w:r></w:p>'
|
"<w:r>%s<w:tab/></w:r>"
|
||||||
'<w:p>%s<w:r>%s<w:t xml:space="preserve">' % (paragraph_properties, run_properties))
|
'<w:r>%s<w:t xml:space="preserve">' % (run_properties, run_properties),
|
||||||
xml = xml.replace('\n', '</w:t><w:br/><w:t xml:space="preserve">')
|
)
|
||||||
xml = xml.replace('\f', '</w:t></w:r></w:p>'
|
xml = xml.replace(
|
||||||
'<w:p><w:r><w:br w:type="page"/></w:r></w:p>'
|
"\a",
|
||||||
'<w:p>%s<w:r>%s<w:t xml:space="preserve">' % (paragraph_properties, run_properties))
|
"</w:t></w:r></w:p>"
|
||||||
|
'<w:p>%s<w:r>%s<w:t xml:space="preserve">'
|
||||||
|
% (paragraph_properties, run_properties),
|
||||||
|
)
|
||||||
|
xml = xml.replace("\n", '</w:t><w:br/><w:t xml:space="preserve">')
|
||||||
|
xml = xml.replace(
|
||||||
|
"\f",
|
||||||
|
"</w:t></w:r></w:p>"
|
||||||
|
'<w:p><w:r><w:br w:type="page"/></w:r></w:p>'
|
||||||
|
'<w:p>%s<w:r>%s<w:t xml:space="preserve">'
|
||||||
|
% (paragraph_properties, run_properties),
|
||||||
|
)
|
||||||
return xml
|
return xml
|
||||||
|
|
||||||
def resolve_run(paragraph_properties, m):
|
def resolve_run(paragraph_properties, m):
|
||||||
run_properties = re.search(r'<w:rPr>.*?</w:rPr>', m.group(0))
|
run_properties = re.search(r"<w:rPr>.*?</w:rPr>", m.group(0))
|
||||||
run_properties = run_properties.group(0) if run_properties else ''
|
run_properties = run_properties.group(0) if run_properties else ""
|
||||||
return re.sub(r'<w:t(?: [^>]*)?>.*?</w:t>',
|
return re.sub(
|
||||||
lambda x: resolve_text(run_properties, paragraph_properties, x), m.group(0),
|
r"<w:t(?: [^>]*)?>.*?</w:t>",
|
||||||
flags=re.DOTALL)
|
lambda x: resolve_text(run_properties, paragraph_properties, x),
|
||||||
|
m.group(0),
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
def resolve_paragraph(m):
|
def resolve_paragraph(m):
|
||||||
paragraph_properties = re.search(r'<w:pPr>.*?</w:pPr>', m.group(0))
|
paragraph_properties = re.search(r"<w:pPr>.*?</w:pPr>", m.group(0))
|
||||||
paragraph_properties = paragraph_properties.group(0) if paragraph_properties else ''
|
paragraph_properties = (
|
||||||
return re.sub(r'<w:r(?: [^>]*)?>.*?</w:r>',
|
paragraph_properties.group(0) if paragraph_properties else ""
|
||||||
lambda x: resolve_run(paragraph_properties, x),
|
)
|
||||||
m.group(0), flags=re.DOTALL)
|
return re.sub(
|
||||||
|
r"<w:r(?: [^>]*)?>.*?</w:r>",
|
||||||
|
lambda x: resolve_run(paragraph_properties, x),
|
||||||
|
m.group(0),
|
||||||
|
flags=re.DOTALL,
|
||||||
|
)
|
||||||
|
|
||||||
xml = re.sub(r'<w:p(?: [^>]*)?>.*?</w:p>', resolve_paragraph, xml, flags=re.DOTALL)
|
xml = re.sub(
|
||||||
|
r"<w:p(?: [^>]*)?>.*?</w:p>", resolve_paragraph, xml, flags=re.DOTALL
|
||||||
|
)
|
||||||
|
|
||||||
return xml
|
return xml
|
||||||
|
|
||||||
@ -332,7 +427,7 @@ class DocxTemplate(object):
|
|||||||
m = re.match(r'<\?xml[^\?]+\bencoding="([^"]+)"', xml, re.I)
|
m = re.match(r'<\?xml[^\?]+\bencoding="([^"]+)"', xml, re.I)
|
||||||
if m:
|
if m:
|
||||||
return m.group(1)
|
return m.group(1)
|
||||||
return 'utf-8'
|
return "utf-8"
|
||||||
|
|
||||||
def build_headers_footers_xml(self, context, uri, jinja_env=None):
|
def build_headers_footers_xml(self, context, uri, jinja_env=None):
|
||||||
for relKey, part in self.get_headers_footers(uri):
|
for relKey, part in self.get_headers_footers(uri):
|
||||||
@ -353,7 +448,7 @@ class DocxTemplate(object):
|
|||||||
self,
|
self,
|
||||||
context: Dict[str, Any],
|
context: Dict[str, Any],
|
||||||
jinja_env: Optional[Environment] = None,
|
jinja_env: Optional[Environment] = None,
|
||||||
autoescape: bool = False
|
autoescape: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
# init template working attributes
|
# init template working attributes
|
||||||
self.render_init()
|
self.render_init()
|
||||||
@ -377,14 +472,12 @@ class DocxTemplate(object):
|
|||||||
self.map_tree(tree)
|
self.map_tree(tree)
|
||||||
|
|
||||||
# Headers
|
# Headers
|
||||||
headers = self.build_headers_footers_xml(context, self.HEADER_URI,
|
headers = self.build_headers_footers_xml(context, self.HEADER_URI, jinja_env)
|
||||||
jinja_env)
|
|
||||||
for relKey, xml in headers:
|
for relKey, xml in headers:
|
||||||
self.map_headers_footers_xml(relKey, xml)
|
self.map_headers_footers_xml(relKey, xml)
|
||||||
|
|
||||||
# Footers
|
# Footers
|
||||||
footers = self.build_headers_footers_xml(context, self.FOOTER_URI,
|
footers = self.build_headers_footers_xml(context, self.FOOTER_URI, jinja_env)
|
||||||
jinja_env)
|
|
||||||
for relKey, xml in footers:
|
for relKey, xml in footers:
|
||||||
self.map_headers_footers_xml(relKey, xml)
|
self.map_headers_footers_xml(relKey, xml)
|
||||||
|
|
||||||
@ -399,15 +492,15 @@ class DocxTemplate(object):
|
|||||||
parser = etree.XMLParser(recover=True)
|
parser = etree.XMLParser(recover=True)
|
||||||
tree = etree.fromstring(xml, parser=parser)
|
tree = etree.fromstring(xml, parser=parser)
|
||||||
# get namespace
|
# get namespace
|
||||||
ns = '{' + tree.nsmap['w'] + '}'
|
ns = "{" + tree.nsmap["w"] + "}"
|
||||||
# walk trough xml and find table
|
# walk trough xml and find table
|
||||||
for t in tree.iter(ns+'tbl'):
|
for t in tree.iter(ns + "tbl"):
|
||||||
tblGrid = t.find(ns+'tblGrid')
|
tblGrid = t.find(ns + "tblGrid")
|
||||||
columns = tblGrid.findall(ns+'gridCol')
|
columns = tblGrid.findall(ns + "gridCol")
|
||||||
to_add = 0
|
to_add = 0
|
||||||
# walk trough all rows and try to find if there is higher cell count
|
# walk trough all rows and try to find if there is higher cell count
|
||||||
for r in t.iter(ns+'tr'):
|
for r in t.iter(ns + "tr"):
|
||||||
cells = r.findall(ns+'tc')
|
cells = r.findall(ns + "tc")
|
||||||
if (len(columns) + to_add) < len(cells):
|
if (len(columns) + to_add) < len(cells):
|
||||||
to_add = len(cells) - len(columns)
|
to_add = len(cells) - len(columns)
|
||||||
# is necessary to add columns?
|
# is necessary to add columns?
|
||||||
@ -417,39 +510,44 @@ class DocxTemplate(object):
|
|||||||
width = 0.0
|
width = 0.0
|
||||||
new_average = None
|
new_average = None
|
||||||
for c in columns:
|
for c in columns:
|
||||||
if not c.get(ns+'w') is None:
|
if not c.get(ns + "w") is None:
|
||||||
width += float(c.get(ns+'w'))
|
width += float(c.get(ns + "w"))
|
||||||
# try to keep proportion of table
|
# try to keep proportion of table
|
||||||
if width > 0:
|
if width > 0:
|
||||||
old_average = width / len(columns)
|
old_average = width / len(columns)
|
||||||
new_average = width / (len(columns) + to_add)
|
new_average = width / (len(columns) + to_add)
|
||||||
# scale the old columns
|
# scale the old columns
|
||||||
for c in columns:
|
for c in columns:
|
||||||
c.set(ns+'w', str(int(float(c.get(ns+'w')) *
|
c.set(
|
||||||
new_average/old_average)))
|
ns + "w",
|
||||||
|
str(
|
||||||
|
int(float(c.get(ns + "w")) * new_average / old_average)
|
||||||
|
),
|
||||||
|
)
|
||||||
# add new columns
|
# add new columns
|
||||||
for i in range(to_add):
|
for i in range(to_add):
|
||||||
etree.SubElement(tblGrid, ns+'gridCol',
|
etree.SubElement(
|
||||||
{ns+'w': str(int(new_average))})
|
tblGrid, ns + "gridCol", {ns + "w": str(int(new_average))}
|
||||||
|
)
|
||||||
|
|
||||||
# Refetch columns after columns addition.
|
# Refetch columns after columns addition.
|
||||||
columns = tblGrid.findall(ns + 'gridCol')
|
columns = tblGrid.findall(ns + "gridCol")
|
||||||
columns_len = len(columns)
|
columns_len = len(columns)
|
||||||
|
|
||||||
cells_len_max = 0
|
cells_len_max = 0
|
||||||
|
|
||||||
def get_cell_len(total, cell):
|
def get_cell_len(total, cell):
|
||||||
tc_pr = cell.find(ns + 'tcPr')
|
tc_pr = cell.find(ns + "tcPr")
|
||||||
grid_span = None if tc_pr is None else tc_pr.find(ns + 'gridSpan')
|
grid_span = None if tc_pr is None else tc_pr.find(ns + "gridSpan")
|
||||||
|
|
||||||
if grid_span is not None:
|
if grid_span is not None:
|
||||||
return total + int(grid_span.get(ns + 'val'))
|
return total + int(grid_span.get(ns + "val"))
|
||||||
|
|
||||||
return total + 1
|
return total + 1
|
||||||
|
|
||||||
# Calculate max of table cells to compare with `gridCol`.
|
# Calculate max of table cells to compare with `gridCol`.
|
||||||
for r in t.iter(ns + 'tr'):
|
for r in t.iter(ns + "tr"):
|
||||||
cells = r.findall(ns + 'tc')
|
cells = r.findall(ns + "tc")
|
||||||
cells_len = functools.reduce(get_cell_len, cells, 0)
|
cells_len = functools.reduce(get_cell_len, cells, 0)
|
||||||
cells_len_max = max(cells_len_max, cells_len)
|
cells_len_max = max(cells_len_max, cells_len)
|
||||||
|
|
||||||
@ -463,11 +561,11 @@ class DocxTemplate(object):
|
|||||||
removed_width = 0.0
|
removed_width = 0.0
|
||||||
|
|
||||||
for c in columns[-to_remove:]:
|
for c in columns[-to_remove:]:
|
||||||
removed_width += float(c.get(ns + 'w'))
|
removed_width += float(c.get(ns + "w"))
|
||||||
|
|
||||||
tblGrid.remove(c)
|
tblGrid.remove(c)
|
||||||
|
|
||||||
columns_left = tblGrid.findall(ns + 'gridCol')
|
columns_left = tblGrid.findall(ns + "gridCol")
|
||||||
|
|
||||||
# Distribute `removed_width` across all columns that has
|
# Distribute `removed_width` across all columns that has
|
||||||
# left after extras removal.
|
# left after extras removal.
|
||||||
@ -477,15 +575,15 @@ class DocxTemplate(object):
|
|||||||
extra_space = int(extra_space)
|
extra_space = int(extra_space)
|
||||||
|
|
||||||
for c in columns_left:
|
for c in columns_left:
|
||||||
c.set(ns+'w', str(int(float(c.get(ns+'w')) + extra_space)))
|
c.set(ns + "w", str(int(float(c.get(ns + "w")) + extra_space)))
|
||||||
|
|
||||||
return tree
|
return tree
|
||||||
|
|
||||||
def fix_docpr_ids(self, tree):
|
def fix_docpr_ids(self, tree):
|
||||||
# some Ids may have some collisions : so renumbering all of them :
|
# some Ids may have some collisions : so renumbering all of them :
|
||||||
for elt in tree.xpath('//wp:docPr', namespaces=docx.oxml.ns.nsmap):
|
for elt in tree.xpath("//wp:docPr", namespaces=docx.oxml.ns.nsmap):
|
||||||
self.docx_ids_index += 1
|
self.docx_ids_index += 1
|
||||||
elt.attrib['id'] = str(self.docx_ids_index)
|
elt.attrib["id"] = str(self.docx_ids_index)
|
||||||
|
|
||||||
def new_subdoc(self, docpath=None):
|
def new_subdoc(self, docpath=None):
|
||||||
self.init_docx()
|
self.init_docx()
|
||||||
@ -493,13 +591,13 @@ class DocxTemplate(object):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_file_crc(file_obj):
|
def get_file_crc(file_obj):
|
||||||
if hasattr(file_obj, 'read'):
|
if hasattr(file_obj, "read"):
|
||||||
buf = file_obj.read()
|
buf = file_obj.read()
|
||||||
else:
|
else:
|
||||||
with open(file_obj, 'rb') as fh:
|
with open(file_obj, "rb") as fh:
|
||||||
buf = fh.read()
|
buf = fh.read()
|
||||||
|
|
||||||
crc = (binascii.crc32(buf) & 0xFFFFFFFF)
|
crc = binascii.crc32(buf) & 0xFFFFFFFF
|
||||||
return crc
|
return crc
|
||||||
|
|
||||||
def replace_media(self, src_file, dst_file):
|
def replace_media(self, src_file, dst_file):
|
||||||
@ -522,10 +620,10 @@ class DocxTemplate(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
crc = self.get_file_crc(src_file)
|
crc = self.get_file_crc(src_file)
|
||||||
if hasattr(dst_file, 'read'):
|
if hasattr(dst_file, "read"):
|
||||||
self.crc_to_new_media[crc] = dst_file.read()
|
self.crc_to_new_media[crc] = dst_file.read()
|
||||||
else:
|
else:
|
||||||
with open(dst_file, 'rb') as fh:
|
with open(dst_file, "rb") as fh:
|
||||||
self.crc_to_new_media[crc] = fh.read()
|
self.crc_to_new_media[crc] = fh.read()
|
||||||
|
|
||||||
def replace_pic(self, embedded_file, dst_file):
|
def replace_pic(self, embedded_file, dst_file):
|
||||||
@ -543,11 +641,11 @@ class DocxTemplate(object):
|
|||||||
for replace_embedded and replace_media)
|
for replace_embedded and replace_media)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if hasattr(dst_file, 'read'):
|
if hasattr(dst_file, "read"):
|
||||||
# NOTE: file extension not checked
|
# NOTE: file extension not checked
|
||||||
self.pics_to_replace[embedded_file] = dst_file.read()
|
self.pics_to_replace[embedded_file] = dst_file.read()
|
||||||
else:
|
else:
|
||||||
with open(dst_file, 'rb') as fh:
|
with open(dst_file, "rb") as fh:
|
||||||
self.pics_to_replace[embedded_file] = fh.read()
|
self.pics_to_replace[embedded_file] = fh.read()
|
||||||
|
|
||||||
def replace_embedded(self, src_file, dst_file):
|
def replace_embedded(self, src_file, dst_file):
|
||||||
@ -563,7 +661,7 @@ class DocxTemplate(object):
|
|||||||
Note2 : it is important to have the source file as it is required to
|
Note2 : it is important to have the source file as it is required to
|
||||||
calculate its CRC to find them in the docx
|
calculate its CRC to find them in the docx
|
||||||
"""
|
"""
|
||||||
with open(dst_file, 'rb') as fh:
|
with open(dst_file, "rb") as fh:
|
||||||
crc = self.get_file_crc(src_file)
|
crc = self.get_file_crc(src_file)
|
||||||
self.crc_to_new_embedded[crc] = fh.read()
|
self.crc_to_new_embedded[crc] = fh.read()
|
||||||
|
|
||||||
@ -594,7 +692,7 @@ class DocxTemplate(object):
|
|||||||
"word/embeddings/". Note that the file is renamed by MSWord,
|
"word/embeddings/". Note that the file is renamed by MSWord,
|
||||||
so you have to guess a little bit...
|
so you have to guess a little bit...
|
||||||
"""
|
"""
|
||||||
with open(dst_file, 'rb') as fh:
|
with open(dst_file, "rb") as fh:
|
||||||
self.zipname_to_replace[zipname] = fh.read()
|
self.zipname_to_replace[zipname] = fh.read()
|
||||||
|
|
||||||
def reset_replacements(self):
|
def reset_replacements(self):
|
||||||
@ -619,11 +717,9 @@ class DocxTemplate(object):
|
|||||||
self.pics_to_replace = {}
|
self.pics_to_replace = {}
|
||||||
|
|
||||||
def post_processing(self, docx_file):
|
def post_processing(self, docx_file):
|
||||||
if (self.crc_to_new_media or
|
if self.crc_to_new_media or self.crc_to_new_embedded or self.zipname_to_replace:
|
||||||
self.crc_to_new_embedded or
|
|
||||||
self.zipname_to_replace):
|
|
||||||
|
|
||||||
if hasattr(docx_file, 'read'):
|
if hasattr(docx_file, "read"):
|
||||||
tmp_file = io.BytesIO()
|
tmp_file = io.BytesIO()
|
||||||
DocxTemplate(docx_file).save(tmp_file)
|
DocxTemplate(docx_file).save(tmp_file)
|
||||||
tmp_file.seek(0)
|
tmp_file.seek(0)
|
||||||
@ -632,27 +728,31 @@ class DocxTemplate(object):
|
|||||||
docx_file.seek(0)
|
docx_file.seek(0)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
tmp_file = '%s_docxtpl_before_replace_medias' % docx_file
|
tmp_file = "%s_docxtpl_before_replace_medias" % docx_file
|
||||||
os.rename(docx_file, tmp_file)
|
os.rename(docx_file, tmp_file)
|
||||||
|
|
||||||
with zipfile.ZipFile(tmp_file) as zin:
|
with zipfile.ZipFile(tmp_file) as zin:
|
||||||
with zipfile.ZipFile(docx_file, 'w') as zout:
|
with zipfile.ZipFile(docx_file, "w") as zout:
|
||||||
for item in zin.infolist():
|
for item in zin.infolist():
|
||||||
buf = zin.read(item.filename)
|
buf = zin.read(item.filename)
|
||||||
if item.filename in self.zipname_to_replace:
|
if item.filename in self.zipname_to_replace:
|
||||||
zout.writestr(item, self.zipname_to_replace[item.filename])
|
zout.writestr(item, self.zipname_to_replace[item.filename])
|
||||||
elif (item.filename.startswith('word/media/') and
|
elif (
|
||||||
item.CRC in self.crc_to_new_media):
|
item.filename.startswith("word/media/")
|
||||||
|
and item.CRC in self.crc_to_new_media
|
||||||
|
):
|
||||||
zout.writestr(item, self.crc_to_new_media[item.CRC])
|
zout.writestr(item, self.crc_to_new_media[item.CRC])
|
||||||
elif (item.filename.startswith('word/embeddings/') and
|
elif (
|
||||||
item.CRC in self.crc_to_new_embedded):
|
item.filename.startswith("word/embeddings/")
|
||||||
|
and item.CRC in self.crc_to_new_embedded
|
||||||
|
):
|
||||||
zout.writestr(item, self.crc_to_new_embedded[item.CRC])
|
zout.writestr(item, self.crc_to_new_embedded[item.CRC])
|
||||||
else:
|
else:
|
||||||
zout.writestr(item, buf)
|
zout.writestr(item, buf)
|
||||||
|
|
||||||
if not hasattr(tmp_file, 'read'):
|
if not hasattr(tmp_file, "read"):
|
||||||
os.remove(tmp_file)
|
os.remove(tmp_file)
|
||||||
if hasattr(docx_file, 'read'):
|
if hasattr(docx_file, "read"):
|
||||||
docx_file.seek(0)
|
docx_file.seek(0)
|
||||||
|
|
||||||
def pre_processing(self):
|
def pre_processing(self):
|
||||||
@ -677,9 +777,7 @@ class DocxTemplate(object):
|
|||||||
# make sure all template images defined by user were replaced
|
# make sure all template images defined by user were replaced
|
||||||
for img_id, replaced in replaced_pics.items():
|
for img_id, replaced in replaced_pics.items():
|
||||||
if not replaced:
|
if not replaced:
|
||||||
raise ValueError(
|
raise ValueError("Picture %s not found in the docx template" % img_id)
|
||||||
"Picture %s not found in the docx template" % img_id
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_pic_map(self):
|
def get_pic_map(self):
|
||||||
return self.pic_map
|
return self.pic_map
|
||||||
@ -690,16 +788,17 @@ class DocxTemplate(object):
|
|||||||
|
|
||||||
part_map = {}
|
part_map = {}
|
||||||
|
|
||||||
gds = et.xpath('//a:graphic/a:graphicData', namespaces=docx.oxml.ns.nsmap)
|
gds = et.xpath("//a:graphic/a:graphicData", namespaces=docx.oxml.ns.nsmap)
|
||||||
for gd in gds:
|
for gd in gds:
|
||||||
rel = None
|
rel = None
|
||||||
# Either IMAGE, CHART, SMART_ART, ...
|
# Either IMAGE, CHART, SMART_ART, ...
|
||||||
try:
|
try:
|
||||||
if gd.attrib['uri'] == docx.oxml.ns.nsmap['pic']:
|
if gd.attrib["uri"] == docx.oxml.ns.nsmap["pic"]:
|
||||||
# Either PICTURE or LINKED_PICTURE image
|
# Either PICTURE or LINKED_PICTURE image
|
||||||
blip = gd.xpath('pic:pic/pic:blipFill/a:blip',
|
blip = gd.xpath(
|
||||||
namespaces=docx.oxml.ns.nsmap)[0]
|
"pic:pic/pic:blipFill/a:blip", namespaces=docx.oxml.ns.nsmap
|
||||||
dest = blip.xpath('@r:embed', namespaces=docx.oxml.ns.nsmap)
|
)[0]
|
||||||
|
dest = blip.xpath("@r:embed", namespaces=docx.oxml.ns.nsmap)
|
||||||
if len(dest) > 0:
|
if len(dest) > 0:
|
||||||
rel = dest[0]
|
rel = dest[0]
|
||||||
else:
|
else:
|
||||||
@ -707,24 +806,29 @@ class DocxTemplate(object):
|
|||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
non_visual_properties = 'pic:pic/pic:nvPicPr/pic:cNvPr/'
|
non_visual_properties = "pic:pic/pic:nvPicPr/pic:cNvPr/"
|
||||||
filename = gd.xpath('%s@name' % non_visual_properties,
|
filename = gd.xpath(
|
||||||
namespaces=docx.oxml.ns.nsmap)[0]
|
"%s@name" % non_visual_properties, namespaces=docx.oxml.ns.nsmap
|
||||||
titles = gd.xpath('%s@title' % non_visual_properties,
|
)[0]
|
||||||
namespaces=docx.oxml.ns.nsmap)
|
titles = gd.xpath(
|
||||||
|
"%s@title" % non_visual_properties, namespaces=docx.oxml.ns.nsmap
|
||||||
|
)
|
||||||
if titles:
|
if titles:
|
||||||
title = titles[0]
|
title = titles[0]
|
||||||
else:
|
else:
|
||||||
title = ""
|
title = ""
|
||||||
descriptions = gd.xpath('%s@descr' % non_visual_properties,
|
descriptions = gd.xpath(
|
||||||
namespaces=docx.oxml.ns.nsmap)
|
"%s@descr" % non_visual_properties, namespaces=docx.oxml.ns.nsmap
|
||||||
|
)
|
||||||
if descriptions:
|
if descriptions:
|
||||||
description = descriptions[0]
|
description = descriptions[0]
|
||||||
else:
|
else:
|
||||||
description = ""
|
description = ""
|
||||||
|
|
||||||
part_map[filename] = (doc_part.rels[rel].target_ref,
|
part_map[filename] = (
|
||||||
doc_part.rels[rel].target_part)
|
doc_part.rels[rel].target_ref,
|
||||||
|
doc_part.rels[rel].target_part,
|
||||||
|
)
|
||||||
|
|
||||||
# replace data
|
# replace data
|
||||||
for img_id, img_data in six.iteritems(self.pics_to_replace):
|
for img_id, img_data in six.iteritems(self.pics_to_replace):
|
||||||
@ -741,8 +845,7 @@ class DocxTemplate(object):
|
|||||||
|
|
||||||
def build_url_id(self, url):
|
def build_url_id(self, url):
|
||||||
self.init_docx()
|
self.init_docx()
|
||||||
return self.docx._part.relate_to(url, REL_TYPE.HYPERLINK,
|
return self.docx._part.relate_to(url, REL_TYPE.HYPERLINK, is_external=True)
|
||||||
is_external=True)
|
|
||||||
|
|
||||||
def save(self, filename: Union[IO[bytes], str, PathLike], *args, **kwargs) -> None:
|
def save(self, filename: Union[IO[bytes], str, PathLike], *args, **kwargs) -> None:
|
||||||
# case where save() is called without doing rendering
|
# case where save() is called without doing rendering
|
||||||
@ -754,7 +857,9 @@ class DocxTemplate(object):
|
|||||||
self.post_processing(filename)
|
self.post_processing(filename)
|
||||||
self.is_saved = True
|
self.is_saved = True
|
||||||
|
|
||||||
def get_undeclared_template_variables(self, jinja_env: Optional[Environment] = None) -> Set[str]:
|
def get_undeclared_template_variables(
|
||||||
|
self, jinja_env: Optional[Environment] = None
|
||||||
|
) -> Set[str]:
|
||||||
self.init_docx(reload=False)
|
self.init_docx(reload=False)
|
||||||
xml = self.get_xml()
|
xml = self.get_xml()
|
||||||
xml = self.patch_xml(xml)
|
xml = self.patch_xml(xml)
|
||||||
|
|||||||
120
poetry.lock
generated
120
poetry.lock
generated
@ -14,6 +14,75 @@ files = [
|
|||||||
[package.extras]
|
[package.extras]
|
||||||
dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
|
dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "black"
|
||||||
|
version = "24.4.2"
|
||||||
|
description = "The uncompromising code formatter."
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.8"
|
||||||
|
files = [
|
||||||
|
{file = "black-24.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce"},
|
||||||
|
{file = "black-24.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021"},
|
||||||
|
{file = "black-24.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063"},
|
||||||
|
{file = "black-24.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96"},
|
||||||
|
{file = "black-24.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474"},
|
||||||
|
{file = "black-24.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c"},
|
||||||
|
{file = "black-24.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb"},
|
||||||
|
{file = "black-24.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1"},
|
||||||
|
{file = "black-24.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d"},
|
||||||
|
{file = "black-24.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04"},
|
||||||
|
{file = "black-24.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc"},
|
||||||
|
{file = "black-24.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0"},
|
||||||
|
{file = "black-24.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7"},
|
||||||
|
{file = "black-24.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94"},
|
||||||
|
{file = "black-24.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8"},
|
||||||
|
{file = "black-24.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c"},
|
||||||
|
{file = "black-24.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1"},
|
||||||
|
{file = "black-24.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741"},
|
||||||
|
{file = "black-24.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e"},
|
||||||
|
{file = "black-24.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7"},
|
||||||
|
{file = "black-24.4.2-py3-none-any.whl", hash = "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c"},
|
||||||
|
{file = "black-24.4.2.tar.gz", hash = "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
click = ">=8.0.0"
|
||||||
|
mypy-extensions = ">=0.4.3"
|
||||||
|
packaging = ">=22.0"
|
||||||
|
pathspec = ">=0.9.0"
|
||||||
|
platformdirs = ">=2"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
colorama = ["colorama (>=0.4.3)"]
|
||||||
|
d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
|
||||||
|
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
|
||||||
|
uvloop = ["uvloop (>=0.15.2)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "click"
|
||||||
|
version = "8.1.7"
|
||||||
|
description = "Composable command line interface toolkit"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
|
||||||
|
{file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colorama"
|
||||||
|
version = "0.4.6"
|
||||||
|
description = "Cross-platform colored terminal text."
|
||||||
|
optional = false
|
||||||
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
|
||||||
|
files = [
|
||||||
|
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
|
||||||
|
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "docxcompose"
|
name = "docxcompose"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
@ -306,6 +375,55 @@ files = [
|
|||||||
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mypy-extensions"
|
||||||
|
version = "1.0.0"
|
||||||
|
description = "Type system extensions for programs checked with the mypy type checker."
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.5"
|
||||||
|
files = [
|
||||||
|
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
|
||||||
|
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "packaging"
|
||||||
|
version = "24.1"
|
||||||
|
description = "Core utilities for Python packages"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.8"
|
||||||
|
files = [
|
||||||
|
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
|
||||||
|
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pathspec"
|
||||||
|
version = "0.12.1"
|
||||||
|
description = "Utility library for gitignore style pattern matching of file paths."
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.8"
|
||||||
|
files = [
|
||||||
|
{file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
|
||||||
|
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "platformdirs"
|
||||||
|
version = "4.2.2"
|
||||||
|
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.8"
|
||||||
|
files = [
|
||||||
|
{file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"},
|
||||||
|
{file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
|
||||||
|
test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"]
|
||||||
|
type = ["mypy (>=1.8)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pycodestyle"
|
name = "pycodestyle"
|
||||||
version = "2.12.0"
|
version = "2.12.0"
|
||||||
@ -384,4 +502,4 @@ files = [
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.11"
|
python-versions = "^3.11"
|
||||||
content-hash = "0a19499992b7770bc844b87288ec61c29b194487b3e99437a9004e66d7965ca8"
|
content-hash = "43818448bde523eafcedcdaeb6541d8205a5d52eef5cb4d0e1a0563a7134a579"
|
||||||
|
|||||||
@ -11,6 +11,7 @@ six = "^1.16.0"
|
|||||||
python-docx = "^1.1.2"
|
python-docx = "^1.1.2"
|
||||||
docxcompose = "^1.4.0"
|
docxcompose = "^1.4.0"
|
||||||
jinja2 = "^3.1.4"
|
jinja2 = "^3.1.4"
|
||||||
|
black = "^24.4.2"
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
|||||||
@ -1,42 +1,42 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-12
|
Created : 2015-03-12
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate, RichText
|
from docxtpl import DocxTemplate, RichText
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/cellbg_tpl.docx')
|
tpl = DocxTemplate("templates/cellbg_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'alerts': [
|
"alerts": [
|
||||||
{
|
{
|
||||||
'date': '2015-03-10',
|
"date": "2015-03-10",
|
||||||
'desc': RichText('Very critical alert', color='FF0000', bold=True),
|
"desc": RichText("Very critical alert", color="FF0000", bold=True),
|
||||||
'type': 'CRITICAL',
|
"type": "CRITICAL",
|
||||||
'bg': 'FF0000',
|
"bg": "FF0000",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'date': '2015-03-11',
|
"date": "2015-03-11",
|
||||||
'desc': RichText('Just a warning'),
|
"desc": RichText("Just a warning"),
|
||||||
'type': 'WARNING',
|
"type": "WARNING",
|
||||||
'bg': 'FFDD00',
|
"bg": "FFDD00",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'date': '2015-03-12',
|
"date": "2015-03-12",
|
||||||
'desc': RichText('Information'),
|
"desc": RichText("Information"),
|
||||||
'type': 'INFO',
|
"type": "INFO",
|
||||||
'bg': '8888FF',
|
"bg": "8888FF",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'date': '2015-03-13',
|
"date": "2015-03-13",
|
||||||
'desc': RichText('Debug trace'),
|
"desc": RichText("Debug trace"),
|
||||||
'type': 'DEBUG',
|
"type": "DEBUG",
|
||||||
'bg': 'FF00FF',
|
"bg": "FF00FF",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/cellbg.docx')
|
tpl.save("output/cellbg.docx")
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/comments_tpl.docx')
|
tpl = DocxTemplate("templates/comments_tpl.docx")
|
||||||
|
|
||||||
tpl.render({})
|
tpl.render({})
|
||||||
tpl.save('output/comments.docx')
|
tpl.save("output/comments.docx")
|
||||||
|
|||||||
@ -1,9 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-12
|
Created : 2015-03-12
|
||||||
|
|
||||||
@author: sandeeprah, Eric Lapouyade
|
@author: sandeeprah, Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
import jinja2
|
import jinja2
|
||||||
@ -14,7 +14,7 @@ jinja_env = jinja2.Environment()
|
|||||||
# to create new filters, first create functions that accept the value to filter
|
# to create new filters, first create functions that accept the value to filter
|
||||||
# as first argument, and filter parameters as next arguments
|
# as first argument, and filter parameters as next arguments
|
||||||
def my_filterA(value, my_string_arg):
|
def my_filterA(value, my_string_arg):
|
||||||
return_value = value + ' ' + my_string_arg
|
return_value = value + " " + my_string_arg
|
||||||
return return_value
|
return return_value
|
||||||
|
|
||||||
|
|
||||||
@ -24,12 +24,12 @@ def my_filterB(value, my_float_arg):
|
|||||||
|
|
||||||
|
|
||||||
# Then, declare them to jinja like this :
|
# Then, declare them to jinja like this :
|
||||||
jinja_env.filters['my_filterA'] = my_filterA
|
jinja_env.filters["my_filterA"] = my_filterA
|
||||||
jinja_env.filters['my_filterB'] = my_filterB
|
jinja_env.filters["my_filterB"] = my_filterB
|
||||||
|
|
||||||
|
|
||||||
context = {'base_value_string': ' Hello', 'base_value_float': 1.5}
|
context = {"base_value_string": " Hello", "base_value_float": 1.5}
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/custom_jinja_filters_tpl.docx')
|
tpl = DocxTemplate("templates/custom_jinja_filters_tpl.docx")
|
||||||
tpl.render(context, jinja_env)
|
tpl.render(context, jinja_env)
|
||||||
tpl.save('output/custom_jinja_filters.docx')
|
tpl.save("output/custom_jinja_filters.docx")
|
||||||
|
|||||||
@ -1,12 +1,10 @@
|
|||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
doctemplate = r'templates/doc_properties_tpl.docx'
|
doctemplate = r"templates/doc_properties_tpl.docx"
|
||||||
|
|
||||||
tpl = DocxTemplate(doctemplate)
|
tpl = DocxTemplate(doctemplate)
|
||||||
|
|
||||||
context = {
|
context = {"test": "HelloWorld"}
|
||||||
'test': 'HelloWorld'
|
|
||||||
}
|
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save("output/doc_properties.docx")
|
tpl.save("output/doc_properties.docx")
|
||||||
|
|||||||
@ -1,15 +1,15 @@
|
|||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/dynamic_table_tpl.docx')
|
tpl = DocxTemplate("templates/dynamic_table_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'col_labels': ['fruit', 'vegetable', 'stone', 'thing'],
|
"col_labels": ["fruit", "vegetable", "stone", "thing"],
|
||||||
'tbl_contents': [
|
"tbl_contents": [
|
||||||
{'label': 'yellow', 'cols': ['banana', 'capsicum', 'pyrite', 'taxi']},
|
{"label": "yellow", "cols": ["banana", "capsicum", "pyrite", "taxi"]},
|
||||||
{'label': 'red', 'cols': ['apple', 'tomato', 'cinnabar', 'doubledecker']},
|
{"label": "red", "cols": ["apple", "tomato", "cinnabar", "doubledecker"]},
|
||||||
{'label': 'green', 'cols': ['guava', 'cucumber', 'aventurine', 'card']},
|
{"label": "green", "cols": ["guava", "cucumber", "aventurine", "card"]},
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/dynamic_table.docx')
|
tpl.save("output/dynamic_table.docx")
|
||||||
|
|||||||
@ -1,45 +1,45 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2017-09-09
|
Created : 2017-09-09
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
# rendering the "dynamic embedded docx":
|
# rendering the "dynamic embedded docx":
|
||||||
embedded_docx_tpl = DocxTemplate('templates/embedded_embedded_docx_tpl.docx')
|
embedded_docx_tpl = DocxTemplate("templates/embedded_embedded_docx_tpl.docx")
|
||||||
context = {
|
context = {
|
||||||
'name': 'John Doe',
|
"name": "John Doe",
|
||||||
}
|
}
|
||||||
embedded_docx_tpl.render(context)
|
embedded_docx_tpl.render(context)
|
||||||
embedded_docx_tpl.save('output/embedded_embedded_docx.docx')
|
embedded_docx_tpl.save("output/embedded_embedded_docx.docx")
|
||||||
|
|
||||||
|
|
||||||
# rendering the main document :
|
# rendering the main document :
|
||||||
tpl = DocxTemplate('templates/embedded_main_tpl.docx')
|
tpl = DocxTemplate("templates/embedded_main_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'name': 'John Doe',
|
"name": "John Doe",
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.replace_embedded(
|
tpl.replace_embedded(
|
||||||
'templates/embedded_dummy.docx', 'templates/embedded_static_docx.docx'
|
"templates/embedded_dummy.docx", "templates/embedded_static_docx.docx"
|
||||||
)
|
)
|
||||||
tpl.replace_embedded(
|
tpl.replace_embedded(
|
||||||
'templates/embedded_dummy2.docx', 'output/embedded_embedded_docx.docx'
|
"templates/embedded_dummy2.docx", "output/embedded_embedded_docx.docx"
|
||||||
)
|
)
|
||||||
|
|
||||||
# The zipname is the one you can find when you open docx with WinZip, 7zip (Windows)
|
# The zipname is the one you can find when you open docx with WinZip, 7zip (Windows)
|
||||||
# or unzip -l (Linux). The zipname starts with "word/embeddings/".
|
# or unzip -l (Linux). The zipname starts with "word/embeddings/".
|
||||||
# Note that the file is renamed by MSWord, so you have to guess a little bit...
|
# Note that the file is renamed by MSWord, so you have to guess a little bit...
|
||||||
tpl.replace_zipname(
|
tpl.replace_zipname(
|
||||||
'word/embeddings/Feuille_Microsoft_Office_Excel3.xlsx', 'templates/real_Excel.xlsx'
|
"word/embeddings/Feuille_Microsoft_Office_Excel3.xlsx", "templates/real_Excel.xlsx"
|
||||||
)
|
)
|
||||||
tpl.replace_zipname(
|
tpl.replace_zipname(
|
||||||
'word/embeddings/Pr_sentation_Microsoft_Office_PowerPoint4.pptx',
|
"word/embeddings/Pr_sentation_Microsoft_Office_PowerPoint4.pptx",
|
||||||
'templates/real_PowerPoint.pptx',
|
"templates/real_PowerPoint.pptx",
|
||||||
)
|
)
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/embedded.docx')
|
tpl.save("output/embedded.docx")
|
||||||
|
|||||||
@ -1,19 +1,19 @@
|
|||||||
from docxtpl import DocxTemplate, R, Listing
|
from docxtpl import DocxTemplate, R, Listing
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/escape_tpl.docx')
|
tpl = DocxTemplate("templates/escape_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'myvar': R(
|
"myvar": R(
|
||||||
'"less than" must be escaped : <, this can be done with RichText() or R()'
|
'"less than" must be escaped : <, this can be done with RichText() or R()'
|
||||||
),
|
),
|
||||||
'myescvar': 'It can be escaped with a "|e" jinja filter in the template too : < ',
|
"myescvar": 'It can be escaped with a "|e" jinja filter in the template too : < ',
|
||||||
'nlnp': R('Here is a multiple\nlines\nstring\aand some\aother\aparagraphs',
|
"nlnp": R(
|
||||||
color='#ff00ff'),
|
"Here is a multiple\nlines\nstring\aand some\aother\aparagraphs",
|
||||||
'mylisting': Listing(
|
color="#ff00ff",
|
||||||
'the listing\nwith\nsome\nlines\nand special chars : <>& ...'
|
|
||||||
),
|
),
|
||||||
'page_break': R('\f'),
|
"mylisting": Listing("the listing\nwith\nsome\nlines\nand special chars : <>& ..."),
|
||||||
'new_listing': """
|
"page_break": R("\f"),
|
||||||
|
"new_listing": """
|
||||||
This is a new listing
|
This is a new listing
|
||||||
Now, does not require Listing() Object
|
Now, does not require Listing() Object
|
||||||
Here is a \t tab\a
|
Here is a \t tab\a
|
||||||
@ -21,34 +21,34 @@ Here is a new paragraph\a
|
|||||||
Here is a page break : \f
|
Here is a page break : \f
|
||||||
That's it
|
That's it
|
||||||
""",
|
""",
|
||||||
'some_html': (
|
"some_html": (
|
||||||
'HTTP/1.1 200 OK\n'
|
"HTTP/1.1 200 OK\n"
|
||||||
'Server: Apache-Coyote/1.1\n'
|
"Server: Apache-Coyote/1.1\n"
|
||||||
'Cache-Control: no-store\n'
|
"Cache-Control: no-store\n"
|
||||||
'Expires: Thu, 01 Jan 1970 00:00:00 GMT\n'
|
"Expires: Thu, 01 Jan 1970 00:00:00 GMT\n"
|
||||||
'Pragma: no-cache\n'
|
"Pragma: no-cache\n"
|
||||||
'Content-Type: text/html;charset=UTF-8\n'
|
"Content-Type: text/html;charset=UTF-8\n"
|
||||||
'Content-Language: zh-CN\n'
|
"Content-Language: zh-CN\n"
|
||||||
'Date: Thu, 22 Oct 2020 10:59:40 GMT\n'
|
"Date: Thu, 22 Oct 2020 10:59:40 GMT\n"
|
||||||
'Content-Length: 9866\n'
|
"Content-Length: 9866\n"
|
||||||
'\n'
|
"\n"
|
||||||
'<html>\n'
|
"<html>\n"
|
||||||
'<head>\n'
|
"<head>\n"
|
||||||
' <title>Struts Problem Report</title>\n'
|
" <title>Struts Problem Report</title>\n"
|
||||||
' <style>\n'
|
" <style>\n"
|
||||||
' \tpre {\n'
|
" \tpre {\n"
|
||||||
'\t \tmargin: 0;\n'
|
"\t \tmargin: 0;\n"
|
||||||
'\t padding: 0;\n'
|
"\t padding: 0;\n"
|
||||||
'\t } '
|
"\t } "
|
||||||
'\n'
|
"\n"
|
||||||
' </style>\n'
|
" </style>\n"
|
||||||
'</head>\n'
|
"</head>\n"
|
||||||
'<body>\n'
|
"<body>\n"
|
||||||
'...\n'
|
"...\n"
|
||||||
'</body>\n'
|
"</body>\n"
|
||||||
'</html>'
|
"</html>"
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/escape.docx')
|
tpl.save("output/escape.docx")
|
||||||
|
|||||||
@ -12,18 +12,18 @@ from docxtpl import DocxTemplate
|
|||||||
|
|
||||||
XML_RESERVED = """<"&'>"""
|
XML_RESERVED = """<"&'>"""
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/escape_tpl_auto.docx')
|
tpl = DocxTemplate("templates/escape_tpl_auto.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'nested_dict': {name(text_type(c)): c for c in XML_RESERVED},
|
"nested_dict": {name(text_type(c)): c for c in XML_RESERVED},
|
||||||
'autoescape': 'Escaped "str & ing"!',
|
"autoescape": 'Escaped "str & ing"!',
|
||||||
'autoescape_unicode': u'This is an escaped <unicode> example \u4f60 & \u6211',
|
"autoescape_unicode": "This is an escaped <unicode> example \u4f60 & \u6211",
|
||||||
'iteritems': iteritems,
|
"iteritems": iteritems,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context, autoescape=True)
|
tpl.render(context, autoescape=True)
|
||||||
|
|
||||||
OUTPUT = 'output'
|
OUTPUT = "output"
|
||||||
if not os.path.exists(OUTPUT):
|
if not os.path.exists(OUTPUT):
|
||||||
os.makedirs(OUTPUT)
|
os.makedirs(OUTPUT)
|
||||||
tpl.save(OUTPUT + '/escape_auto.docx')
|
tpl.save(OUTPUT + "/escape_auto.docx")
|
||||||
|
|||||||
@ -1,25 +1,25 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-12
|
Created : 2015-03-12
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/header_footer_tpl.docx')
|
tpl = DocxTemplate("templates/header_footer_tpl.docx")
|
||||||
|
|
||||||
sd = tpl.new_subdoc()
|
sd = tpl.new_subdoc()
|
||||||
p = sd.add_paragraph(
|
p = sd.add_paragraph(
|
||||||
'This is a sub-document to check it does not break header and footer'
|
"This is a sub-document to check it does not break header and footer"
|
||||||
)
|
)
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'title': 'Header and footer test',
|
"title": "Header and footer test",
|
||||||
'company_name': 'The World Wide company',
|
"company_name": "The World Wide company",
|
||||||
'date': '2016-03-17',
|
"date": "2016-03-17",
|
||||||
'mysubdoc': sd,
|
"mysubdoc": sd,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/header_footer.docx')
|
tpl.save("output/header_footer.docx")
|
||||||
|
|||||||
@ -1,17 +1,17 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-12
|
Created : 2015-03-12
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/header_footer_entities_tpl.docx')
|
tpl = DocxTemplate("templates/header_footer_entities_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'title': 'Header and footer test',
|
"title": "Header and footer test",
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/header_footer_entities.docx')
|
tpl.save("output/header_footer_entities.docx")
|
||||||
|
|||||||
@ -1,19 +1,19 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2017-09-03
|
Created : 2017-09-03
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
DEST_FILE = 'output/header_footer_image.docx'
|
DEST_FILE = "output/header_footer_image.docx"
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/header_footer_image_tpl.docx')
|
tpl = DocxTemplate("templates/header_footer_image_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'mycompany': 'The World Wide company',
|
"mycompany": "The World Wide company",
|
||||||
}
|
}
|
||||||
tpl.replace_media('templates/dummy_pic_for_header.png', 'templates/python.png')
|
tpl.replace_media("templates/dummy_pic_for_header.png", "templates/python.png")
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save(DEST_FILE)
|
tpl.save(DEST_FILE)
|
||||||
|
|||||||
@ -1,29 +1,29 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2019-05-22
|
Created : 2019-05-22
|
||||||
|
|
||||||
@author: Eric Dufresne
|
@author: Eric Dufresne
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
import io
|
import io
|
||||||
|
|
||||||
DEST_FILE = 'output/header_footer_image_file_obj.docx'
|
DEST_FILE = "output/header_footer_image_file_obj.docx"
|
||||||
DEST_FILE2 = 'output/header_footer_image_file_obj2.docx'
|
DEST_FILE2 = "output/header_footer_image_file_obj2.docx"
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/header_footer_image_tpl.docx')
|
tpl = DocxTemplate("templates/header_footer_image_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'mycompany': 'The World Wide company',
|
"mycompany": "The World Wide company",
|
||||||
}
|
}
|
||||||
|
|
||||||
dummy_pic = io.BytesIO(open('templates/dummy_pic_for_header.png', 'rb').read())
|
dummy_pic = io.BytesIO(open("templates/dummy_pic_for_header.png", "rb").read())
|
||||||
new_image = io.BytesIO(open('templates/python.png', 'rb').read())
|
new_image = io.BytesIO(open("templates/python.png", "rb").read())
|
||||||
tpl.replace_media(dummy_pic, new_image)
|
tpl.replace_media(dummy_pic, new_image)
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save(DEST_FILE)
|
tpl.save(DEST_FILE)
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/header_footer_image_tpl.docx')
|
tpl = DocxTemplate("templates/header_footer_image_tpl.docx")
|
||||||
dummy_pic.seek(0)
|
dummy_pic.seek(0)
|
||||||
new_image.seek(0)
|
new_image.seek(0)
|
||||||
tpl.replace_media(dummy_pic, new_image)
|
tpl.replace_media(dummy_pic, new_image)
|
||||||
@ -32,5 +32,5 @@ tpl.render(context)
|
|||||||
file_obj = io.BytesIO()
|
file_obj = io.BytesIO()
|
||||||
tpl.save(file_obj)
|
tpl.save(file_obj)
|
||||||
file_obj.seek(0)
|
file_obj.seek(0)
|
||||||
with open(DEST_FILE2, 'wb') as f:
|
with open(DEST_FILE2, "wb") as f:
|
||||||
f.write(file_obj.read())
|
f.write(file_obj.read())
|
||||||
|
|||||||
@ -1,24 +1,24 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2021-04-06
|
Created : 2021-04-06
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate, InlineImage
|
from docxtpl import DocxTemplate, InlineImage
|
||||||
|
|
||||||
# for height and width you have to use millimeters (Mm), inches or points(Pt) class :
|
# for height and width you have to use millimeters (Mm), inches or points(Pt) class :
|
||||||
from docx.shared import Mm
|
from docx.shared import Mm
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/header_footer_inline_image_tpl.docx')
|
tpl = DocxTemplate("templates/header_footer_inline_image_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'inline_image': InlineImage(tpl, 'templates/django.png', height=Mm(10)),
|
"inline_image": InlineImage(tpl, "templates/django.png", height=Mm(10)),
|
||||||
'images': [
|
"images": [
|
||||||
InlineImage(tpl, 'templates/python.png', height=Mm(10)),
|
InlineImage(tpl, "templates/python.png", height=Mm(10)),
|
||||||
InlineImage(tpl, 'templates/python.png', height=Mm(10)),
|
InlineImage(tpl, "templates/python.png", height=Mm(10)),
|
||||||
InlineImage(tpl, 'templates/python.png', height=Mm(10))
|
InlineImage(tpl, "templates/python.png", height=Mm(10)),
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/header_footer_inline_image.docx')
|
tpl.save("output/header_footer_inline_image.docx")
|
||||||
|
|||||||
@ -1,28 +1,28 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2016-07-19
|
Created : 2016-07-19
|
||||||
|
|
||||||
@author: AhnSeongHyun
|
@author: AhnSeongHyun
|
||||||
|
|
||||||
Edited : 2016-07-19 by Eric Lapouyade
|
Edited : 2016-07-19 by Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/header_footer_tpl_utf8.docx')
|
tpl = DocxTemplate("templates/header_footer_tpl_utf8.docx")
|
||||||
|
|
||||||
sd = tpl.new_subdoc()
|
sd = tpl.new_subdoc()
|
||||||
p = sd.add_paragraph(
|
p = sd.add_paragraph(
|
||||||
u'This is a sub-document to check it does not break header and footer with utf-8 '
|
"This is a sub-document to check it does not break header and footer with utf-8 "
|
||||||
u'characters inside the template .docx'
|
"characters inside the template .docx"
|
||||||
)
|
)
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'title': u'헤더와 푸터',
|
"title": "헤더와 푸터",
|
||||||
'company_name': u'세계적 회사',
|
"company_name": "세계적 회사",
|
||||||
'date': u'2016-03-17',
|
"date": "2016-03-17",
|
||||||
'mysubdoc': sd,
|
"mysubdoc": sd,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/header_footer_utf8.docx')
|
tpl.save("output/header_footer_utf8.docx")
|
||||||
|
|||||||
@ -2,6 +2,6 @@
|
|||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/horizontal_merge_tpl.docx')
|
tpl = DocxTemplate("templates/horizontal_merge_tpl.docx")
|
||||||
tpl.render({})
|
tpl.render({})
|
||||||
tpl.save('output/horizontal_merge.docx')
|
tpl.save("output/horizontal_merge.docx")
|
||||||
|
|||||||
@ -1,9 +1,9 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2017-01-14
|
Created : 2017-01-14
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate, InlineImage
|
from docxtpl import DocxTemplate, InlineImage
|
||||||
|
|
||||||
@ -11,37 +11,37 @@ from docxtpl import DocxTemplate, InlineImage
|
|||||||
from docx.shared import Mm
|
from docx.shared import Mm
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/inline_image_tpl.docx')
|
tpl = DocxTemplate("templates/inline_image_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'myimage': InlineImage(tpl, 'templates/python_logo.png', width=Mm(20)),
|
"myimage": InlineImage(tpl, "templates/python_logo.png", width=Mm(20)),
|
||||||
'myimageratio': InlineImage(
|
"myimageratio": InlineImage(
|
||||||
tpl, 'templates/python_jpeg.jpg', width=Mm(30), height=Mm(60)
|
tpl, "templates/python_jpeg.jpg", width=Mm(30), height=Mm(60)
|
||||||
),
|
),
|
||||||
'frameworks': [
|
"frameworks": [
|
||||||
{
|
{
|
||||||
'image': InlineImage(tpl, 'templates/django.png', height=Mm(10)),
|
"image": InlineImage(tpl, "templates/django.png", height=Mm(10)),
|
||||||
'desc': 'The web framework for perfectionists with deadlines',
|
"desc": "The web framework for perfectionists with deadlines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'image': InlineImage(tpl, 'templates/zope.png', height=Mm(10)),
|
"image": InlineImage(tpl, "templates/zope.png", height=Mm(10)),
|
||||||
'desc': 'Zope is a leading Open Source Application Server and Content Management Framework',
|
"desc": "Zope is a leading Open Source Application Server and Content Management Framework",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'image': InlineImage(tpl, 'templates/pyramid.png', height=Mm(10)),
|
"image": InlineImage(tpl, "templates/pyramid.png", height=Mm(10)),
|
||||||
'desc': 'Pyramid is a lightweight Python web framework aimed at taking small web apps into big web apps.',
|
"desc": "Pyramid is a lightweight Python web framework aimed at taking small web apps into big web apps.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'image': InlineImage(tpl, 'templates/bottle.png', height=Mm(10)),
|
"image": InlineImage(tpl, "templates/bottle.png", height=Mm(10)),
|
||||||
'desc': 'Bottle is a fast, simple and lightweight WSGI micro web-framework for Python',
|
"desc": "Bottle is a fast, simple and lightweight WSGI micro web-framework for Python",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'image': InlineImage(tpl, 'templates/tornado.png', height=Mm(10)),
|
"image": InlineImage(tpl, "templates/tornado.png", height=Mm(10)),
|
||||||
'desc': 'Tornado is a Python web framework and asynchronous networking library.',
|
"desc": "Tornado is a Python web framework and asynchronous networking library.",
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
# testing that it works also when autoescape has been forced to True
|
# testing that it works also when autoescape has been forced to True
|
||||||
jinja_env = jinja2.Environment(autoescape=True)
|
jinja_env = jinja2.Environment(autoescape=True)
|
||||||
tpl.render(context, jinja_env)
|
tpl.render(context, jinja_env)
|
||||||
tpl.save('output/inline_image.docx')
|
tpl.save("output/inline_image.docx")
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/less_cells_after_loop_tpl.docx')
|
tpl = DocxTemplate("templates/less_cells_after_loop_tpl.docx")
|
||||||
tpl.render({})
|
tpl.render({})
|
||||||
tpl.save('output/less_cells_after_loop.docx')
|
tpl.save("output/less_cells_after_loop.docx")
|
||||||
|
|||||||
@ -1,19 +1,19 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2021-07-30
|
Created : 2021-07-30
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/merge_docx_master_tpl.docx')
|
tpl = DocxTemplate("templates/merge_docx_master_tpl.docx")
|
||||||
sd = tpl.new_subdoc('templates/merge_docx_subdoc.docx')
|
sd = tpl.new_subdoc("templates/merge_docx_subdoc.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'mysubdoc': sd,
|
"mysubdoc": sd,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/merge_docx.docx')
|
tpl.save("output/merge_docx.docx")
|
||||||
|
|||||||
@ -1,17 +1,17 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-12
|
Created : 2015-03-12
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/merge_paragraph_tpl.docx')
|
tpl = DocxTemplate("templates/merge_paragraph_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'living_in_town': True,
|
"living_in_town": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/merge_paragraph.docx')
|
tpl.save("output/merge_paragraph.docx")
|
||||||
|
|||||||
@ -1,19 +1,25 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
TEMPLATE_PATH = 'templates/module_execute_tpl.docx'
|
TEMPLATE_PATH = "templates/module_execute_tpl.docx"
|
||||||
JSON_PATH = 'templates/module_execute.json'
|
JSON_PATH = "templates/module_execute.json"
|
||||||
OUTPUT_FILENAME = 'output/module_execute.docx'
|
OUTPUT_FILENAME = "output/module_execute.docx"
|
||||||
OVERWRITE = '-o'
|
OVERWRITE = "-o"
|
||||||
QUIET = '-q'
|
QUIET = "-q"
|
||||||
|
|
||||||
|
|
||||||
if os.path.exists(OUTPUT_FILENAME):
|
if os.path.exists(OUTPUT_FILENAME):
|
||||||
os.unlink(OUTPUT_FILENAME)
|
os.unlink(OUTPUT_FILENAME)
|
||||||
|
|
||||||
os.chdir(os.path.dirname(__file__))
|
os.chdir(os.path.dirname(__file__))
|
||||||
cmd = 'python -m docxtpl %s %s %s %s %s' % (TEMPLATE_PATH, JSON_PATH, OUTPUT_FILENAME, OVERWRITE, QUIET)
|
cmd = "python -m docxtpl %s %s %s %s %s" % (
|
||||||
|
TEMPLATE_PATH,
|
||||||
|
JSON_PATH,
|
||||||
|
OUTPUT_FILENAME,
|
||||||
|
OVERWRITE,
|
||||||
|
QUIET,
|
||||||
|
)
|
||||||
print('Executing "%s" ...' % cmd)
|
print('Executing "%s" ...' % cmd)
|
||||||
os.system(cmd)
|
os.system(cmd)
|
||||||
|
|
||||||
if os.path.exists(OUTPUT_FILENAME):
|
if os.path.exists(OUTPUT_FILENAME):
|
||||||
print(' --> File %s has been generated.' % OUTPUT_FILENAME)
|
print(" --> File %s has been generated." % OUTPUT_FILENAME)
|
||||||
|
|||||||
@ -1,40 +1,40 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2021-12-20
|
Created : 2021-12-20
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/multi_rendering_tpl.docx')
|
tpl = DocxTemplate("templates/multi_rendering_tpl.docx")
|
||||||
|
|
||||||
documents_data = [
|
documents_data = [
|
||||||
{
|
{
|
||||||
'dest_file': 'multi_render1.docx',
|
"dest_file": "multi_render1.docx",
|
||||||
'context': {
|
"context": {
|
||||||
'title': 'Title ONE',
|
"title": "Title ONE",
|
||||||
'body': 'This is the body for first document'
|
"body": "This is the body for first document",
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'dest_file': 'multi_render2.docx',
|
"dest_file": "multi_render2.docx",
|
||||||
'context': {
|
"context": {
|
||||||
'title': 'Title TWO',
|
"title": "Title TWO",
|
||||||
'body': 'This is the body for second document'
|
"body": "This is the body for second document",
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'dest_file': 'multi_render3.docx',
|
"dest_file": "multi_render3.docx",
|
||||||
'context': {
|
"context": {
|
||||||
'title': 'Title THREE',
|
"title": "Title THREE",
|
||||||
'body': 'This is the body for third document'
|
"body": "This is the body for third document",
|
||||||
}
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
for document_data in documents_data:
|
for document_data in documents_data:
|
||||||
dest_file = document_data['dest_file']
|
dest_file = document_data["dest_file"]
|
||||||
context = document_data['context']
|
context = document_data["context"]
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/%s' % dest_file)
|
tpl.save("output/%s" % dest_file)
|
||||||
|
|||||||
@ -1,45 +1,45 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2016-03-26
|
Created : 2016-03-26
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/nested_for_tpl.docx')
|
tpl = DocxTemplate("templates/nested_for_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'dishes': [
|
"dishes": [
|
||||||
{'name': 'Pizza', 'ingredients': ['bread', 'tomato', 'ham', 'cheese']},
|
{"name": "Pizza", "ingredients": ["bread", "tomato", "ham", "cheese"]},
|
||||||
{
|
{
|
||||||
'name': 'Hamburger',
|
"name": "Hamburger",
|
||||||
'ingredients': ['bread', 'chopped steak', 'cheese', 'sauce'],
|
"ingredients": ["bread", "chopped steak", "cheese", "sauce"],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name': 'Apple pie',
|
"name": "Apple pie",
|
||||||
'ingredients': ['flour', 'apples', 'suggar', 'quince jelly'],
|
"ingredients": ["flour", "apples", "suggar", "quince jelly"],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
'authors': [
|
"authors": [
|
||||||
{
|
{
|
||||||
'name': 'Saint-Exupery',
|
"name": "Saint-Exupery",
|
||||||
'books': [
|
"books": [
|
||||||
{'title': 'Le petit prince'},
|
{"title": "Le petit prince"},
|
||||||
{'title': "L'aviateur"},
|
{"title": "L'aviateur"},
|
||||||
{'title': 'Vol de nuit'},
|
{"title": "Vol de nuit"},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name': 'Barjavel',
|
"name": "Barjavel",
|
||||||
'books': [
|
"books": [
|
||||||
{'title': 'Ravage'},
|
{"title": "Ravage"},
|
||||||
{'title': "La nuit des temps"},
|
{"title": "La nuit des temps"},
|
||||||
{'title': 'Le grand secret'},
|
{"title": "Le grand secret"},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/nested_for.docx')
|
tpl.save("output/nested_for.docx")
|
||||||
|
|||||||
@ -1,26 +1,26 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-12
|
Created : 2015-03-12
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/order_tpl.docx')
|
tpl = DocxTemplate("templates/order_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'customer_name': 'Eric',
|
"customer_name": "Eric",
|
||||||
'items': [
|
"items": [
|
||||||
{'desc': 'Python interpreters', 'qty': 2, 'price': 'FREE'},
|
{"desc": "Python interpreters", "qty": 2, "price": "FREE"},
|
||||||
{'desc': 'Django projects', 'qty': 5403, 'price': 'FREE'},
|
{"desc": "Django projects", "qty": 5403, "price": "FREE"},
|
||||||
{'desc': 'Guido', 'qty': 1, 'price': '100,000,000.00'},
|
{"desc": "Guido", "qty": 1, "price": "100,000,000.00"},
|
||||||
],
|
],
|
||||||
'in_europe': True,
|
"in_europe": True,
|
||||||
'is_paid': False,
|
"is_paid": False,
|
||||||
'company_name': 'The World Wide company',
|
"company_name": "The World Wide company",
|
||||||
'total_price': '100,000,000.00',
|
"total_price": "100,000,000.00",
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/order.docx')
|
tpl.save("output/order.docx")
|
||||||
|
|||||||
@ -3,12 +3,12 @@ from docxtpl import DocxTemplate
|
|||||||
# With old docxtpl version, "... for spicy ..." was replaced by "... forspicy..."
|
# With old docxtpl version, "... for spicy ..." was replaced by "... forspicy..."
|
||||||
# This test is for checking that is some cases the spaces are not lost anymore
|
# This test is for checking that is some cases the spaces are not lost anymore
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/preserve_spaces_tpl.docx')
|
tpl = DocxTemplate("templates/preserve_spaces_tpl.docx")
|
||||||
|
|
||||||
tags = ['tag_1', 'tag_2']
|
tags = ["tag_1", "tag_2"]
|
||||||
replacement = ['looking', 'too']
|
replacement = ["looking", "too"]
|
||||||
|
|
||||||
context = dict(zip(tags, replacement))
|
context = dict(zip(tags, replacement))
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/preserve_spaces.docx')
|
tpl.save("output/preserve_spaces.docx")
|
||||||
|
|||||||
@ -1,18 +1,18 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2017-09-03
|
Created : 2017-09-03
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
DEST_FILE = 'output/replace_picture.docx'
|
DEST_FILE = "output/replace_picture.docx"
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/replace_picture_tpl.docx')
|
tpl = DocxTemplate("templates/replace_picture_tpl.docx")
|
||||||
|
|
||||||
context = {}
|
context = {}
|
||||||
|
|
||||||
tpl.replace_pic('python_logo.png', 'templates/python.png')
|
tpl.replace_pic("python_logo.png", "templates/python.png")
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save(DEST_FILE)
|
tpl.save(DEST_FILE)
|
||||||
|
|||||||
@ -1,55 +1,64 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-26
|
Created : 2015-03-26
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate, RichText
|
from docxtpl import DocxTemplate, RichText
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/richtext_tpl.docx')
|
tpl = DocxTemplate("templates/richtext_tpl.docx")
|
||||||
|
|
||||||
rt = RichText()
|
rt = RichText()
|
||||||
rt.add('a rich text', style='myrichtextstyle')
|
rt.add("a rich text", style="myrichtextstyle")
|
||||||
rt.add(' with ')
|
rt.add(" with ")
|
||||||
rt.add('some italic', italic=True)
|
rt.add("some italic", italic=True)
|
||||||
rt.add(' and ')
|
rt.add(" and ")
|
||||||
rt.add('some violet', color='#ff00ff')
|
rt.add("some violet", color="#ff00ff")
|
||||||
rt.add(' and ')
|
rt.add(" and ")
|
||||||
rt.add('some striked', strike=True)
|
rt.add("some striked", strike=True)
|
||||||
rt.add(' and ')
|
rt.add(" and ")
|
||||||
rt.add('some Highlighted', highlight='#ffff00')
|
rt.add("some Highlighted", highlight="#ffff00")
|
||||||
rt.add(' and ')
|
rt.add(" and ")
|
||||||
rt.add('some small', size=14)
|
rt.add("some small", size=14)
|
||||||
rt.add(' or ')
|
rt.add(" or ")
|
||||||
rt.add('big', size=60)
|
rt.add("big", size=60)
|
||||||
rt.add(' text.')
|
rt.add(" text.")
|
||||||
rt.add('\nYou can add an hyperlink, here to ')
|
rt.add("\nYou can add an hyperlink, here to ")
|
||||||
rt.add('google', url_id=tpl.build_url_id('http://google.com'))
|
rt.add("google", url_id=tpl.build_url_id("http://google.com"))
|
||||||
rt.add('\nEt voilà ! ')
|
rt.add("\nEt voilà ! ")
|
||||||
rt.add('\n1st line')
|
rt.add("\n1st line")
|
||||||
rt.add('\n2nd line')
|
rt.add("\n2nd line")
|
||||||
rt.add('\n3rd line')
|
rt.add("\n3rd line")
|
||||||
rt.add('\aA new paragraph : <cool>\a')
|
rt.add("\aA new paragraph : <cool>\a")
|
||||||
rt.add('--- A page break here (see next page) ---\f')
|
rt.add("--- A page break here (see next page) ---\f")
|
||||||
|
|
||||||
for ul in ['single', 'double', 'thick', 'dotted', 'dash', 'dotDash', 'dotDotDash', 'wave']:
|
for ul in [
|
||||||
rt.add('\nUnderline : ' + ul + ' \n', underline=ul)
|
"single",
|
||||||
rt.add('\nFonts :\n', underline=True)
|
"double",
|
||||||
rt.add('Arial\n', font='Arial')
|
"thick",
|
||||||
rt.add('Courier New\n', font='Courier New')
|
"dotted",
|
||||||
rt.add('Times New Roman\n', font='Times New Roman')
|
"dash",
|
||||||
rt.add('\n\nHere some')
|
"dotDash",
|
||||||
rt.add('superscript', superscript=True)
|
"dotDotDash",
|
||||||
rt.add(' and some')
|
"wave",
|
||||||
rt.add('subscript', subscript=True)
|
]:
|
||||||
|
rt.add("\nUnderline : " + ul + " \n", underline=ul)
|
||||||
|
rt.add("\nFonts :\n", underline=True)
|
||||||
|
rt.add("Arial\n", font="Arial")
|
||||||
|
rt.add("Courier New\n", font="Courier New")
|
||||||
|
rt.add("Times New Roman\n", font="Times New Roman")
|
||||||
|
rt.add("\n\nHere some")
|
||||||
|
rt.add("superscript", superscript=True)
|
||||||
|
rt.add(" and some")
|
||||||
|
rt.add("subscript", subscript=True)
|
||||||
|
|
||||||
rt_embedded = RichText('an example of ')
|
rt_embedded = RichText("an example of ")
|
||||||
rt_embedded.add(rt)
|
rt_embedded.add(rt)
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'example': rt_embedded,
|
"example": rt_embedded,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/richtext.docx')
|
tpl.save("output/richtext.docx")
|
||||||
|
|||||||
@ -1,16 +1,16 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-26
|
Created : 2015-03-26
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate, RichText
|
from docxtpl import DocxTemplate, RichText
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/richtext_and_if_tpl.docx')
|
tpl = DocxTemplate("templates/richtext_and_if_tpl.docx")
|
||||||
|
|
||||||
|
|
||||||
context = {'foobar': RichText('Foobar!', color='ff0000')}
|
context = {"foobar": RichText("Foobar!", color="ff0000")}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/richtext_and_if.docx')
|
tpl.save("output/richtext_and_if.docx")
|
||||||
|
|||||||
@ -6,15 +6,16 @@ Created : 2022-08-03
|
|||||||
|
|
||||||
|
|
||||||
from docxtpl import DocxTemplate, RichText
|
from docxtpl import DocxTemplate, RichText
|
||||||
tpl = DocxTemplate('templates/richtext_eastAsia_tpl.docx')
|
|
||||||
rt = RichText('测试TEST', font='eastAsia:Microsoft YaHei')
|
tpl = DocxTemplate("templates/richtext_eastAsia_tpl.docx")
|
||||||
ch = RichText('测试TEST', font='eastAsia:微软雅黑')
|
rt = RichText("测试TEST", font="eastAsia:Microsoft YaHei")
|
||||||
sun = RichText('测试TEST', font='eastAsia:SimSun')
|
ch = RichText("测试TEST", font="eastAsia:微软雅黑")
|
||||||
|
sun = RichText("测试TEST", font="eastAsia:SimSun")
|
||||||
context = {
|
context = {
|
||||||
'example': rt,
|
"example": rt,
|
||||||
'Chinese': ch,
|
"Chinese": ch,
|
||||||
'simsun': sun,
|
"simsun": sun,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/richtext_eastAsia.docx')
|
tpl.save("output/richtext_eastAsia.docx")
|
||||||
|
|||||||
@ -3,16 +3,16 @@ import glob
|
|||||||
import six
|
import six
|
||||||
import os
|
import os
|
||||||
|
|
||||||
tests = sorted(glob.glob('[A-Za-z]*.py'))
|
tests = sorted(glob.glob("[A-Za-z]*.py"))
|
||||||
excludes = ['runtests.py']
|
excludes = ["runtests.py"]
|
||||||
|
|
||||||
output_dir = os.path.join(os.path.dirname(__file__), 'output')
|
output_dir = os.path.join(os.path.dirname(__file__), "output")
|
||||||
if not os.path.exists(output_dir):
|
if not os.path.exists(output_dir):
|
||||||
os.mkdir(output_dir)
|
os.mkdir(output_dir)
|
||||||
|
|
||||||
for test in tests:
|
for test in tests:
|
||||||
if test not in excludes:
|
if test not in excludes:
|
||||||
six.print_('%s ...' % test)
|
six.print_("%s ..." % test)
|
||||||
subprocess.call(['python', './%s' % test])
|
subprocess.call(["python", "./%s" % test])
|
||||||
|
|
||||||
six.print_('Done.')
|
six.print_("Done.")
|
||||||
|
|||||||
@ -1,36 +1,36 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2015-03-12
|
Created : 2015-03-12
|
||||||
|
|
||||||
@author: Eric Lapouyade
|
@author: Eric Lapouyade
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
from docx.shared import Inches
|
from docx.shared import Inches
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/subdoc_tpl.docx')
|
tpl = DocxTemplate("templates/subdoc_tpl.docx")
|
||||||
|
|
||||||
sd = tpl.new_subdoc()
|
sd = tpl.new_subdoc()
|
||||||
p = sd.add_paragraph('This is a sub-document inserted into a bigger one')
|
p = sd.add_paragraph("This is a sub-document inserted into a bigger one")
|
||||||
p = sd.add_paragraph('It has been ')
|
p = sd.add_paragraph("It has been ")
|
||||||
p.add_run('dynamically').style = 'dynamic'
|
p.add_run("dynamically").style = "dynamic"
|
||||||
p.add_run(' generated with python by using ')
|
p.add_run(" generated with python by using ")
|
||||||
p.add_run('python-docx').italic = True
|
p.add_run("python-docx").italic = True
|
||||||
p.add_run(' library')
|
p.add_run(" library")
|
||||||
|
|
||||||
sd.add_heading('Heading, level 1', level=1)
|
sd.add_heading("Heading, level 1", level=1)
|
||||||
sd.add_paragraph('This is an Intense quote', style='IntenseQuote')
|
sd.add_paragraph("This is an Intense quote", style="IntenseQuote")
|
||||||
|
|
||||||
sd.add_paragraph('A picture :')
|
sd.add_paragraph("A picture :")
|
||||||
sd.add_picture('templates/python_logo.png', width=Inches(1.25))
|
sd.add_picture("templates/python_logo.png", width=Inches(1.25))
|
||||||
|
|
||||||
sd.add_paragraph('A Table :')
|
sd.add_paragraph("A Table :")
|
||||||
table = sd.add_table(rows=1, cols=3)
|
table = sd.add_table(rows=1, cols=3)
|
||||||
hdr_cells = table.rows[0].cells
|
hdr_cells = table.rows[0].cells
|
||||||
hdr_cells[0].text = 'Qty'
|
hdr_cells[0].text = "Qty"
|
||||||
hdr_cells[1].text = 'Id'
|
hdr_cells[1].text = "Id"
|
||||||
hdr_cells[2].text = 'Desc'
|
hdr_cells[2].text = "Desc"
|
||||||
recordset = ((1, 101, 'Spam'), (2, 42, 'Eggs'), (3, 631, 'Spam,spam, eggs, and ham'))
|
recordset = ((1, 101, "Spam"), (2, 42, "Eggs"), (3, 631, "Spam,spam, eggs, and ham"))
|
||||||
for item in recordset:
|
for item in recordset:
|
||||||
row_cells = table.add_row().cells
|
row_cells = table.add_row().cells
|
||||||
row_cells[0].text = str(item[0])
|
row_cells[0].text = str(item[0])
|
||||||
@ -38,8 +38,8 @@ for item in recordset:
|
|||||||
row_cells[2].text = item[2]
|
row_cells[2].text = item[2]
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'mysubdoc': sd,
|
"mysubdoc": sd,
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/subdoc.docx')
|
tpl.save("output/subdoc.docx")
|
||||||
|
|||||||
@ -2,19 +2,19 @@ from docxtpl import DocxTemplate
|
|||||||
from jinja2.exceptions import TemplateError
|
from jinja2.exceptions import TemplateError
|
||||||
import six
|
import six
|
||||||
|
|
||||||
six.print_('=' * 80)
|
six.print_("=" * 80)
|
||||||
six.print_("Generating template error for testing (so it is safe to ignore) :")
|
six.print_("Generating template error for testing (so it is safe to ignore) :")
|
||||||
six.print_('.' * 80)
|
six.print_("." * 80)
|
||||||
try:
|
try:
|
||||||
tpl = DocxTemplate('templates/template_error_tpl.docx')
|
tpl = DocxTemplate("templates/template_error_tpl.docx")
|
||||||
tpl.render({'test_variable': 'test variable value'})
|
tpl.render({"test_variable": "test variable value"})
|
||||||
except TemplateError as the_error:
|
except TemplateError as the_error:
|
||||||
six.print_(six.text_type(the_error))
|
six.print_(six.text_type(the_error))
|
||||||
if hasattr(the_error, 'docx_context'):
|
if hasattr(the_error, "docx_context"):
|
||||||
six.print_("Context:")
|
six.print_("Context:")
|
||||||
for line in the_error.docx_context:
|
for line in the_error.docx_context:
|
||||||
six.print_(line)
|
six.print_(line)
|
||||||
tpl.save('output/template_error.docx')
|
tpl.save("output/template_error.docx")
|
||||||
six.print_('.' * 80)
|
six.print_("." * 80)
|
||||||
six.print_(" End of TemplateError Test ")
|
six.print_(" End of TemplateError Test ")
|
||||||
six.print_('=' * 80)
|
six.print_("=" * 80)
|
||||||
|
|||||||
@ -1,23 +1,23 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
"""
|
||||||
Created : 2017-10-15
|
Created : 2017-10-15
|
||||||
|
|
||||||
@author: Arthaslixin
|
@author: Arthaslixin
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/vertical_merge_tpl.docx')
|
tpl = DocxTemplate("templates/vertical_merge_tpl.docx")
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
'items': [
|
"items": [
|
||||||
{'desc': 'Python interpreters', 'qty': 2, 'price': 'FREE'},
|
{"desc": "Python interpreters", "qty": 2, "price": "FREE"},
|
||||||
{'desc': 'Django projects', 'qty': 5403, 'price': 'FREE'},
|
{"desc": "Django projects", "qty": 5403, "price": "FREE"},
|
||||||
{'desc': 'Guido', 'qty': 1, 'price': '100,000,000.00'},
|
{"desc": "Guido", "qty": 1, "price": "100,000,000.00"},
|
||||||
],
|
],
|
||||||
'total_price': '100,000,000.00',
|
"total_price": "100,000,000.00",
|
||||||
'category': 'Book',
|
"category": "Book",
|
||||||
}
|
}
|
||||||
|
|
||||||
tpl.render(context)
|
tpl.render(context)
|
||||||
tpl.save('output/vertical_merge.docx')
|
tpl.save("output/vertical_merge.docx")
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
from docxtpl import DocxTemplate
|
from docxtpl import DocxTemplate
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/vertical_merge_nested_tpl.docx')
|
tpl = DocxTemplate("templates/vertical_merge_nested_tpl.docx")
|
||||||
tpl.render({})
|
tpl.render({})
|
||||||
tpl.save('output/vertical_merge_nested.docx')
|
tpl.save("output/vertical_merge_nested.docx")
|
||||||
|
|||||||
@ -1,12 +1,12 @@
|
|||||||
from docxtpl import DocxTemplate, RichText
|
from docxtpl import DocxTemplate, RichText
|
||||||
|
|
||||||
tpl = DocxTemplate('templates/word2016_tpl.docx')
|
tpl = DocxTemplate("templates/word2016_tpl.docx")
|
||||||
tpl.render(
|
tpl.render(
|
||||||
{
|
{
|
||||||
'test_space': ' ',
|
"test_space": " ",
|
||||||
'test_tabs': 5 * '\t',
|
"test_tabs": 5 * "\t",
|
||||||
'test_space_r': RichText(' '),
|
"test_space_r": RichText(" "),
|
||||||
'test_tabs_r': RichText(5 * '\t'),
|
"test_tabs_r": RichText(5 * "\t"),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
tpl.save('output/word2016.docx')
|
tpl.save("output/word2016.docx")
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user