diff --git a/cobalt/site/README.md b/cobalt/site/README.md
new file mode 100644
index 000000000000..4a17edbce192
--- /dev/null
+++ b/cobalt/site/README.md
@@ -0,0 +1,22 @@
+# Regenerating developer site contents
+
+1. Update documents and images. Note that files in `cobalt/site/gen` and
+ `cobalt/site/reference` are autogenerated and changes in these directories
+ will be overwritten by the steps below.
+
+2. From the root of your repository run the following to build the docsite
+ Docker image.
+
+ ```shell
+ $ docker build -t docsite --build-arg UID=$(id -u) --build-arg GID=$(id -g) cobalt/site/docker
+ ```
+
+3. Run it to regenerate the site.
+
+ ```shell
+ $ docker run -it --mount type=bind,source=$(pwd),target=/code docsite
+ ```
+
+4. (Optional) To preview the changes you must commit the generated files and
+ create a Pull Request. That pull request can be previewed in staging by
+ following the direction for deploying the developer site.
diff --git a/cobalt/site/docker/Dockerfile b/cobalt/site/docker/Dockerfile
new file mode 100644
index 000000000000..b22bc09b5aa9
--- /dev/null
+++ b/cobalt/site/docker/Dockerfile
@@ -0,0 +1,65 @@
+# Copyright 2021 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG FROM_IMAGE
+FROM ${FROM_IMAGE:-gcr.io/cloud-marketplace-containers/google/debian10}
+
+RUN apt update -qqy \
+ && apt install -qqy --no-install-recommends \
+ curl \
+ doxygen \
+ git \
+ python3 \
+ unzip \
+ && apt-get clean autoclean \
+ && apt-get autoremove -y --purge \
+ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
+ && rm -rf /var/lib/{apt,dpkg,cache,log}
+
+# Tell GN that we are building with Docker.
+ENV IS_DOCKER=1
+
+# Set python path for gn.
+ENV PYTHONPATH=/code
+
+# Mark the source directory safe.
+RUN git config --global --add safe.directory /code
+
+# === Get GN via CIPD
+ARG GN_SHA256SUM="af7b2dcb3905bca56655e12131b365f1cba8e159db80d2022330c4f522fab2ef /tmp/gn.zip"
+ARG GN_HASH=r3styzkFvKVmVeEhMbNl8cuo4VnbgNICIzDE9SL6su8C
+RUN curl --location --silent --output /tmp/gn.zip \
+ "https://chrome-infra-packages.appspot.com/dl/gn/gn/linux-amd64/+/${GN_HASH}" \
+ && echo ${GN_SHA256SUM} | sha256sum --check \
+ && unzip /tmp/gn.zip -d /usr/local/bin \
+ && rm /tmp/gn.zip
+RUN chmod a+x /usr/local/bin/gn
+
+# We create and use a non-root user explicitly so that the generated and
+# modified files maintain the same permissions as the user that launched the
+# Docker container.
+ARG USER
+ARG UID
+ARG GID
+RUN addgroup --group --gid "${GID}" defaultgroup \
+ && adduser --disabled-password --gecos '' --uid "${UID}" --gid "${GID}" defaultuser
+
+# Create an out directory for gn. Its name is hardcoded in the docsite script.
+RUN mkdir /project_out_dir \
+ && chown ${USER:-defaultuser}:defaultgroup /project_out_dir
+
+# Once the directory has been created we can switch to the new user.
+USER ${USER:-defaultuser}
+
+CMD /code/cobalt/site/scripts/generate_site.py
diff --git a/cobalt/site/scripts/cobalt_configuration_public.py b/cobalt/site/scripts/cobalt_configuration_public.py
new file mode 100644
index 000000000000..d7cb8136cf8d
--- /dev/null
+++ b/cobalt/site/scripts/cobalt_configuration_public.py
@@ -0,0 +1,162 @@
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Module to generate Starboard Configuration Reference."""
+import environment
+import os
+import re
+import sys
+
+
+def print_doc_header(doc, title):
+ doc.write('Project: /youtube/cobalt/_project.yaml\n')
+ doc.write('Book: /youtube/cobalt/_book.yaml\n\n')
+ doc.write(f'# {title}\n\n')
+
+
+def print_doc_section_header(doc, section, section_headers):
+ doc.write('## ' + section + '\n\n')
+ if section in section_headers:
+ doc.write(section_headers[section] + '\n\n')
+
+
+def print_section_property_table_header(doc):
+ doc.write('| Properties |\n')
+ doc.write('| :--- |\n')
+
+
+def print_property(doc, prop):
+ comment = prop['comment']
+ comment = comment.replace('__', '__')
+ if prop['value']:
+ line_break = ''
+ value = prop['value'].replace('|', r'\|')
+ if len(value) > 30:
+ line_break = '
'
+ if comment[-8:] != '
':
+ comment += '
'
+ comment += ('The default value in the Stub implementation is ' +
+ line_break + '`' + prop['value'] + '`')
+ elif prop['undefined']:
+ if comment[-8:] != '
':
+ comment += '
'
+ comment += 'By default, this property is undefined.'
+ if comment[0:8] != '
':
+ doc.write('| **`' + prop['name'] + '`**
' + comment + ' |\n')
+ else:
+ doc.write('| **`' + prop['name'] + '`**' + comment + ' |\n')
+
+
+def create_reference_doc(site_path, properties, section_headers):
+ reference_doc_path = os.path.join(site_path, 'docs', 'reference', 'starboard',
+ 'configuration-public.md')
+ environment.make_dirs(os.path.dirname(reference_doc_path))
+ with open(reference_doc_path, 'w', encoding='utf8') as doc:
+ print_doc_header(doc, 'Starboard Configuration Reference Guide')
+
+ for section in sorted(properties):
+ if len(properties[section]) > 0:
+ print_doc_section_header(doc, section, section_headers)
+
+ print_section_property_table_header(doc)
+
+ for p in range(0, len(properties[section])):
+ print_property(doc, properties[section][p])
+
+ doc.write('\n\n')
+
+
+def main(source_dir, output_dir=None):
+ config_dir = environment.get_stub_platform_dir(source_dir)
+ file_path = os.path.join(config_dir, 'configuration_public.h')
+ with open(file_path, 'r', encoding='utf8') as file_contents:
+ file_lines = file_contents.readlines()
+
+ # parse .proto files
+ comment = ''
+ in_line_item = ''
+ section = ''
+ properties = {}
+ section_headers = {}
+ last_thing_was_a_header = False
+ for original_line in file_lines:
+ line = original_line.strip()
+ if line[0:7] == '// --- ':
+ section = line[7:].split(' -')[0]
+ properties[section] = []
+ last_thing_was_a_header = True
+ elif section and (line[0:8] == '#define ' or line[0:7] == '#undef '):
+
+ if in_line_item:
+ if comment:
+ comment += '' + in_line_item + '>'
+ in_line_item = ''
+ last_thing_was_a_header = False
+ prop_array = line.split(' ')
+ prop = {'comment': '', 'name': '', 'value': '', 'undefined': False}
+ if line[0:7] == '#undef ':
+ prop['undefined'] = True
+ if len(prop_array) > 1:
+ prop['name'] = prop_array[1]
+ if len(prop_array) > 2:
+ prop['value'] = ' '.join(prop_array[2:])
+ if comment:
+ prop['comment'] = comment.strip()
+ if '(' in prop['name'] and ')' not in prop['name']:
+ new_string = ' '.join(prop_array[1:])
+ new_prop_array = new_string.split(')')
+ prop['name'] = new_prop_array[0] + ')'
+ new_value = ')'.join(new_prop_array[1:])
+ prop['value'] = new_value.strip()
+ properties[section].append(prop)
+ comment = ''
+ elif section and line[0:2] == '//':
+ ol_item_regex = re.compile(r'^\d\. ')
+ comment_text = line[2:].strip()
+ is_ol_item = re.search(ol_item_regex, comment_text)
+ if (is_ol_item or comment_text.strip()[0:2] == '- ' or
+ comment_text.strip()[0:2] == '* '):
+ # Replace '* ' at beginning of comment with '
'
+ # Strip whitespace before '*' and after '*" up to start of text
+ if not in_line_item:
+ if is_ol_item:
+ comment_text = '- ' + comment_text.strip()[2:].strip()
+ in_line_item = 'ol'
+ else:
+ comment_text = '
- ' + comment_text.strip()[1:].strip()
+ in_line_item = 'ul'
+ else:
+ if is_ol_item:
+ comment_text = '
- ' + comment_text.strip()[2:].strip()
+ else:
+ comment_text = '
- ' + comment_text.strip()[1:].strip()
+ comment += ' ' + comment_text
+ elif comment and line == '':
+ if last_thing_was_a_header:
+ section_headers[section] = comment
+ last_thing_was_a_header = False
+ comment = ''
+ if comment[-8:] != '
':
+ comment += '
'
+
+ if output_dir:
+ site_path = environment.get_site_dir(output_dir)
+ else:
+ site_path = environment.get_site_dir(source_dir)
+ create_reference_doc(site_path, properties, section_headers)
+ return 0
+
+
+if __name__ == '__main__':
+ options = environment.parse_arguments(__doc__, sys.argv[1:])
+ sys.exit(main(options.source, options.out))
diff --git a/cobalt/site/scripts/cobalt_documentation.py b/cobalt/site/scripts/cobalt_documentation.py
new file mode 100644
index 000000000000..36c9ca44bbb6
--- /dev/null
+++ b/cobalt/site/scripts/cobalt_documentation.py
@@ -0,0 +1,61 @@
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Module to copy Cobalt doc folders to cobalt site directory."""
+import environment
+import os
+import shutil
+import sys
+
+_COBALT_DOC_LOCATIONS = [
+ 'cobalt/doc/',
+ 'starboard/doc/',
+ 'starboard/build/doc/',
+ 'starboard/tools/doc/',
+]
+
+
+def write_header(doc):
+ doc.write('Project: /youtube/cobalt/_project.yaml\n')
+ doc.write('Book: /youtube/cobalt/_book.yaml\n\n')
+
+
+def copy_doc_locations(source_dir, output_dir=None):
+ if output_dir:
+ site_path = environment.get_site_dir(output_dir)
+ else:
+ site_path = environment.get_site_dir(source_dir)
+
+ gen_dir = os.path.join(site_path, 'docs', 'gen')
+ if os.path.exists(gen_dir):
+ shutil.rmtree(gen_dir)
+
+ for location in _COBALT_DOC_LOCATIONS:
+ shutil.copytree(
+ os.path.join(source_dir, location), os.path.join(gen_dir, location))
+
+ for root, _, files in os.walk(gen_dir):
+ for filename in files:
+ if not filename.endswith('.md'):
+ continue
+ filename = os.path.join(root, filename)
+ with open(filename, encoding='utf8') as f:
+ lines = f.readlines()
+ with open(filename, 'w', encoding='utf8') as f:
+ write_header(f)
+ f.writelines(lines)
+
+
+if __name__ == '__main__':
+ out = sys.argv[2] if len(sys.argv) == 3 else None
+ copy_doc_locations(sys.argv[1], out)
diff --git a/cobalt/site/scripts/cobalt_gn_configuration.py b/cobalt/site/scripts/cobalt_gn_configuration.py
new file mode 100644
index 000000000000..901554df99d0
--- /dev/null
+++ b/cobalt/site/scripts/cobalt_gn_configuration.py
@@ -0,0 +1,159 @@
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Module to generate GN Configuration Reference."""
+import json
+import environment
+import os
+import re
+import subprocess
+import sys
+
+
+def print_doc_header(doc, title):
+ doc.write('Project: /youtube/cobalt/_project.yaml\n')
+ doc.write('Book: /youtube/cobalt/_book.yaml\n\n')
+ doc.write(f'# {title}\n\n')
+
+
+def print_section_property_table_header(doc, section):
+ section = section.replace('_', ' ').capitalize()
+ doc.write('| ' + section + ' |\n')
+ doc.write('| :--- |\n')
+
+
+def format_comment(raw_comment):
+ """Reformats the comment to be a single line and layout lists correctly."""
+ list_item_regex = re.compile(r'\s+"([^"]+)"\s*-+\s*(.*)')
+ continued_list_item_regex = re.compile(r'\s+')
+
+ comment = ''
+ is_last_line_list_item = False
+ for line in raw_comment.splitlines():
+ list_item_match = re.search(list_item_regex, line)
+ comment_text = line.strip()
+ if comment_text[0:4] == 'TODO':
+ continue
+ elif list_item_match:
+ is_last_line_list_item = True
+ comment += f' {list_item_match[1]}
- {list_item_match[2]}'
+ elif comment_text == '':
+ comment += '
'
+ else:
+ if is_last_line_list_item:
+ continued_list_item_match = re.search(continued_list_item_regex, line)
+ if not continued_list_item_match:
+ comment += ' '
+ comment += ' ' + comment_text
+
+ # Wrap the list items in a list element.
+ comment = comment.replace('- ', '
- ', 1)
+ comment = last_replace(comment, '
', '
')
+ return comment
+
+
+def print_item(doc, name, value, comment):
+ comment = format_comment(comment)
+ if value:
+ value = value.replace('|', r'\|')
+ if comment[-8:] != '
':
+ comment += '
'
+ comment += ('The default value is `' + value + '`.')
+ if comment[0:8] != '
':
+ doc.write('| **`' + name + '`**
' + comment + ' |\n')
+ else:
+ doc.write('| **`' + name + '`**' + comment + ' |\n')
+
+
+def create_reference_doc(site_path, variables):
+ reference_doc_path = os.path.join(site_path, 'docs', 'reference', 'starboard',
+ 'gn-configuration.md')
+ environment.make_dirs(os.path.dirname(reference_doc_path))
+ with open(reference_doc_path, 'w', encoding='utf8') as doc:
+ print_doc_header(doc, 'Starboard: configuration.gni Reference Guide')
+
+ print_section_property_table_header(doc, 'variables')
+ for name, value, comment in sorted(variables, key=lambda x: x[0]):
+ print_item(doc, name, value, comment)
+
+
+def add_args(args, function_data):
+ function_data = function_data.replace(');', '')
+ if function_data == '':
+ return args
+ arg_list = function_data.split(',')
+ for arg_index in range(0, len(arg_list)):
+ arg_item_no_ws = arg_list[arg_index].strip()
+ if arg_item_no_ws != '':
+ arg_details = arg_item_no_ws.split(' ')
+ args.append({'name': arg_details.pop(), 'type': ' '.join(arg_details)})
+ return args
+
+
+def last_replace(value, old, new):
+ reverse_list = value.rsplit(old, 1)
+ return new.join(reverse_list)
+
+
+def main(source_dir, output_dir=None):
+ base_config_path = '//starboard/build/config/base_configuration.gni'
+
+ # `gn args` will resolve all variables used in the default values before
+ # printing so in order to keep the docs looking reasonable the variables
+ # used must be listed here.
+ # Note that `root_out_dir` is not a build argument and cannot be overridden.
+ args = ['clang_base_path']
+ args_overrides = ' '.join(f'{x}="<{x}>"' for x in args)
+ try:
+ out_dir = '/project_out_dir'
+ subprocess.check_call(['gn', 'gen', out_dir], cwd=source_dir)
+ output = subprocess.check_output(
+ ['gn', 'args', out_dir, '--list', '--json', '--args=' + args_overrides],
+ cwd=source_dir)
+ except subprocess.CalledProcessError as cpe:
+ raise RuntimeError(f'Failed to run GN: {cpe.output}') from cpe
+
+ # If `gn args` invokes any print statements these will be printed before the
+ # json string starts. Strip all lines until we encounter the start of the json
+ # array.
+ output_lines = output.decode('utf-8').splitlines()
+ gn_vars = []
+ while output_lines:
+ try:
+ gn_vars = json.loads(' '.join(output_lines))
+ except ValueError:
+ # If the attempt to parse the rest of the lines fails we remove the first
+ # line and try again.
+ output_lines = output_lines[1:]
+ else:
+ break
+
+ variables = []
+ for variable in gn_vars:
+ if variable['default'].get('file') == base_config_path:
+ name = variable['name']
+ value = variable['default']['value']
+ comment = variable.get('comment', '').strip()
+ variables.append((name, value, comment))
+
+ if output_dir:
+ site_path = environment.get_site_dir(output_dir)
+ else:
+ site_path = environment.get_site_dir(source_dir)
+ create_reference_doc(site_path, variables)
+ return 0
+
+
+if __name__ == '__main__':
+ options = environment.parse_arguments('', sys.argv[1:])
+ sys.exit(main(options.source, options.out))
diff --git a/cobalt/site/scripts/cobalt_module_reference.py b/cobalt/site/scripts/cobalt_module_reference.py
new file mode 100644
index 000000000000..8cd3798b1df3
--- /dev/null
+++ b/cobalt/site/scripts/cobalt_module_reference.py
@@ -0,0 +1,486 @@
+# Copyright 2017 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Generates reference markdown documentation for Starboard modules.
+
+The full process:
+1. Preprocess the files to be Doxygen friendly (Doxygenation).
+2. Run Doxygen to produce XML that describes each module.
+3. Parse the XML and generate Markdown for each module.
+"""
+
+import contextlib
+from io import StringIO
+import logging
+import os
+import re
+import sys
+from xml.etree import ElementTree as ET
+
+import doxygen
+import environment
+from markdown_writer import MarkdownWriter
+
+_HEADER_PATTERN = r'.*\.h$'
+_HEADER_RE = re.compile(_HEADER_PATTERN)
+
+_HEADER_XML_PATTERN = r'.*h\.xml$'
+_HEADER_XML_RE = re.compile(_HEADER_XML_PATTERN)
+
+_SCRIPT_FILE = os.path.basename(__file__)
+_SCRIPT_NAME, _ = os.path.splitext(_SCRIPT_FILE)
+
+_OSS_STARBOARD_VERSIONS = [13, 14, 15]
+
+
+def _strip(string_or_none):
+ if string_or_none:
+ return string_or_none.strip()
+ return ''
+
+
+def _get_files(parent_path, regexp):
+ return sorted([
+ os.path.realpath(os.path.join(parent_path, x))
+ for x in os.listdir(parent_path)
+ if regexp.match(x)
+ ])
+
+
+def _find_filename(compounddef_element):
+ return _strip(compounddef_element.findtext('./compoundname'))
+
+
+def _find_name(member_def):
+ return _strip(member_def.findtext('./name'))
+
+
+def _has_innertext(element, query=''):
+ return bool(_find_innertext(element, query))
+
+
+def _has_description(memberdef_element):
+ return (_has_innertext(memberdef_element, './briefdescription') or
+ _has_innertext(memberdef_element, './detaileddescription'))
+
+
+def _find_innertext(element, query=''):
+ if element is None:
+ return ''
+ if query:
+ node = element.find(query)
+ if node is None:
+ return ''
+ else:
+ node = element
+ return _strip(''.join(x.strip() for x in node.itertext()))
+
+
+def _find_memberdefs(compounddef_element, kind):
+ return compounddef_element.findall(f'./sectiondef/memberdef[@kind="{kind}"]')
+
+
+def _find_documented_memberdefs(compounddef_element, kind):
+ memberdef_elements = _find_memberdefs(compounddef_element, kind)
+ if not memberdef_elements:
+ return {}
+
+ all_memberdefs = {_find_name(x): x for x in memberdef_elements}
+
+ def essential(k, v):
+ return _has_description(v) and not k.endswith('Private')
+
+ return {k: v for k, v in all_memberdefs.items() if essential(k, v)}
+
+
+def _find_structs(compounddef_element, xml_path):
+ innerclass_elements = compounddef_element.findall('./innerclass')
+ if not innerclass_elements:
+ return {}
+
+ all_struct_refids = [x.get('refid') for x in innerclass_elements]
+ all_struct_elements = {}
+ for refid in all_struct_refids:
+ struct_xml_path = os.path.join(xml_path, refid + '.xml')
+ struct_xml = ET.parse(struct_xml_path)
+ struct_elements = struct_xml.findall(
+ './/compounddef[@kind="struct"][compoundname]')
+ for struct_element in struct_elements:
+ struct_name = _find_filename(struct_element)
+ all_struct_elements[struct_name] = struct_element
+
+ def essential(k):
+ return not k.endswith('Private')
+
+ return {k: v for k, v in all_struct_elements.items() if essential(k)}
+
+
+def _find_struct_names(compounddef_element, xml_path):
+ return set(_find_structs(compounddef_element, xml_path).keys())
+
+
+def _find_documented_memberdef_names(compounddef_element, *args):
+ found = set([])
+ for kind in args:
+ found.update(_find_documented_memberdefs(compounddef_element, kind).keys())
+ return found
+
+
+def _find_member_definition(memberdef_element):
+ type_element = memberdef_element.find('./type')
+ type_name = _find_innertext(type_element)
+ args_name = _strip(memberdef_element.findtext('./argsstring'))
+ member_name = _strip(memberdef_element.findtext('./name'))
+ # Doxygen does not handle structs of non-typedef'd function pointers
+ # gracefully. The 'type" and "argsstring' elements are used to temporarily
+ # store the information needed to be able to rebuild the full signature, e.g.:
+ #
+ # void (*glEnable)(SbGlEnum cap)
+ #
+ # type = 'void (*'
+ # name = 'glEnable'
+ # argsstring = ')(SbGlEnum cap)'
+ #
+ # When we identify these members using the end of 'type' and the beginning of
+ # 'argsstring' we return the full member signature instead.
+ if type_name.endswith('(*') and args_name.startswith(')('):
+ return type_name + member_name + args_name
+ return type_name + ' ' + member_name
+
+
+def _node_to_markdown(out, node):
+ text = node.text.replace('|', '`') if node.text else ''
+ tail = node.tail.replace('|', '`') if node.tail else ''
+
+ if node.tag == 'ndash':
+ assert not _strip(text)
+ out.text('\u2013')
+ elif node.tag == 'mdash':
+ assert not _strip(text)
+ out.text('\u2013')
+ elif node.tag == 'para':
+ # Block tags should never be nested inside other blocks.
+ assert not _strip(tail)
+ out.paragraph()
+ elif node.tag == 'bold':
+ assert len(node) == 0
+ out.bold(text)
+ elif node.tag == 'computeroutput':
+ assert len(node) == 0
+ out.code(text)
+ elif node.tag == 'ulink':
+ url = node.get('url')
+ assert url
+ out.link(url)
+ elif node.tag == 'orderedlist':
+ # List tags should never have any text of their own.
+ assert not _strip(text)
+
+ # Block tags should never be nested inside other blocks.
+ assert not _strip(tail)
+ out.ordered_list()
+ elif node.tag == 'itemizedlist':
+ # List tags should never have any text of their own.
+ assert not _strip(text)
+
+ # Block tags should never be nested inside other blocks.
+ assert not _strip(tail)
+ out.unordered_list()
+ elif node.tag == 'listitem':
+ out.item()
+ elif node.tag == 'heading':
+ # Block tags should never be nested inside other blocks.
+ assert not _strip(tail)
+ try:
+ levels = int(node.get('level'))
+ except ValueError:
+ levels = 1
+ out.heading(levels=levels)
+ elif node.tag == 'verbatim':
+ # Verbatim tags can appear inside paragraphs.
+ assert len(node) == 0
+ # Don't replace pipes in verbatim text.
+ text = node.text if node.text else ''
+ out.code_block(text)
+ text = ''
+ else:
+ logging.warning('UNHANDLED: %s: %s', node.tag, text.replace('\n', '\\n'))
+
+ if text:
+ out.text(text)
+
+ for child in node:
+ _node_to_markdown(out, child)
+
+ if node.tag == 'para':
+ out.end_paragraph()
+ elif node.tag == 'ulink':
+ out.end_link()
+ elif node.tag in ['orderedlist', 'itemizedlist']:
+ out.end_list()
+ elif node.tag == 'heading':
+ out.end_heading()
+ out.pop_heading_level()
+ elif node.tag == 'listitem':
+ out.end_item()
+
+ if tail:
+ out.text(tail)
+
+
+def _description_to_markdown(out, description_element):
+ if description_element is None:
+ return
+
+ for child in description_element:
+ _node_to_markdown(out, child)
+
+
+def _emit_doc_header(out_io):
+ out_io.write('Project: /youtube/cobalt/_project.yaml\n')
+ out_io.write('Book: /youtube/cobalt/_book.yaml\n\n')
+
+
+def _emit_description(out, memberdef_element):
+ _description_to_markdown(out, memberdef_element.find('./briefdescription'))
+ _description_to_markdown(out, memberdef_element.find('./detaileddescription'))
+
+
+def _emit_macro(out, memberdef_element):
+ name = _find_name(memberdef_element)
+ assert name or _has_description(memberdef_element)
+
+ params = ''
+ param_defs = memberdef_element.findall('./param/defname')
+ if param_defs:
+ param_names = [_strip(x.text) for x in param_defs]
+ params = f'({", ".join(param_names)})'
+
+ logging.info('Macro: %s%s', name, params)
+ with out.auto_scoped_heading(name + params):
+ _emit_description(out, memberdef_element)
+
+
+def _emit_enum(out, memberdef_element):
+ name = _find_name(memberdef_element)
+ assert name or _has_description(memberdef_element)
+
+ logging.info('Enum: %s', name)
+ with out.auto_scoped_heading(name):
+ _emit_description(out, memberdef_element)
+ with out.auto_scoped_heading('Values'):
+ with out.auto_unordered_list():
+ for enumvalue_element in memberdef_element.findall('./enumvalue'):
+ with out.auto_item():
+ out.code(_find_name(enumvalue_element))
+ _emit_description(out, enumvalue_element)
+
+
+def _emit_typedef(out, memberdef_element):
+ name = _find_name(memberdef_element)
+ assert name or _has_description(memberdef_element)
+
+ with out.auto_scoped_heading(name):
+ _emit_description(out, memberdef_element)
+ definition = _strip(memberdef_element.findtext('./definition'))
+ if definition:
+ with out.auto_scoped_heading('Definition'):
+ out.code_block(definition)
+
+
+def _emit_struct(out, compounddef_element):
+ name = _find_filename(compounddef_element)
+ assert name or _has_description(compounddef_element)
+
+ logging.info('Struct: %s', name)
+ with out.auto_scoped_heading(name):
+ _emit_description(out, compounddef_element)
+ memberdef_elements = _find_memberdefs(compounddef_element, 'variable')
+ if memberdef_elements:
+ with out.auto_scoped_heading('Members'):
+ with out.auto_unordered_list():
+ for memberdef_element in memberdef_elements:
+ with out.auto_item():
+ out.code(_find_member_definition(memberdef_element))
+ _emit_description(out, memberdef_element)
+
+
+def _emit_variable(out, memberdef_element):
+ name = _find_name(memberdef_element)
+ assert name or _has_description(memberdef_element)
+
+ logging.info('Variable: %s', name)
+ with out.auto_scoped_heading(name):
+ _emit_description(out, memberdef_element)
+
+
+def _emit_function(out, memberdef_element):
+ name = _find_name(memberdef_element)
+ assert name or _has_description(memberdef_element)
+
+ logging.info('Function: %s', name)
+ with out.auto_scoped_heading(name):
+ _emit_description(out, memberdef_element)
+ prototype = memberdef_element.findtext('./definition') + \
+ memberdef_element.findtext('./argsstring')
+ if prototype:
+ with out.auto_scoped_heading('Declaration'):
+ out.code_block(prototype)
+
+
+def _emit_macros(out, compounddef_element):
+ member_dict = _find_documented_memberdefs(compounddef_element, 'define')
+ if not member_dict:
+ return False
+
+ with out.auto_scoped_heading('Macros'):
+ for name in sorted(member_dict.keys()):
+ _emit_macro(out, member_dict[name])
+ return True
+
+
+def _emit_enums(out, compounddef_element):
+ member_dict = _find_documented_memberdefs(compounddef_element, 'enum')
+ if not member_dict:
+ return False
+
+ with out.auto_scoped_heading('Enums'):
+ for name in sorted(member_dict.keys()):
+ _emit_enum(out, member_dict[name])
+ return True
+
+
+def _emit_typedefs(out, compounddef_element, xml_path):
+ member_dict = _find_documented_memberdefs(compounddef_element, 'typedef')
+ redundant_set = _find_documented_memberdef_names(compounddef_element,
+ 'define', 'enum', 'function')
+ redundant_set |= _find_struct_names(compounddef_element, xml_path)
+ essential_set = set(member_dict.keys()) - redundant_set
+ if not essential_set:
+ return False
+
+ with out.auto_scoped_heading('Typedefs'):
+ for name in sorted(essential_set):
+ _emit_typedef(out, member_dict[name])
+ return True
+
+
+def _emit_structs(out, compounddef_element, xml_path):
+ struct_dict = _find_structs(compounddef_element, xml_path)
+ if not struct_dict:
+ return False
+
+ with out.auto_scoped_heading('Structs'):
+ for name in sorted(struct_dict.keys()):
+ _emit_struct(out, struct_dict[name])
+ return True
+
+
+def _emit_variables(out, compounddef_element):
+ member_dict = _find_documented_memberdefs(compounddef_element, 'variable')
+ if not member_dict:
+ return False
+
+ with out.auto_scoped_heading('Variables'):
+ for name in sorted(member_dict.keys()):
+ _emit_variable(out, member_dict[name])
+ return True
+
+
+def _emit_functions(out, compounddef_element):
+ member_dict = _find_documented_memberdefs(compounddef_element, 'function')
+ if not member_dict:
+ return False
+
+ with out.auto_scoped_heading('Functions'):
+ for name in sorted(member_dict.keys()):
+ _emit_function(out, member_dict[name])
+ return True
+
+
+def _emit_file(out_io, compounddef_element, xml_path):
+ header_filename = _find_filename(compounddef_element)
+ logging.info('File: %s', header_filename)
+ _emit_doc_header(out_io)
+ mdwriter = MarkdownWriter(out_io)
+ with mdwriter.auto_scoped_heading(
+ f'Starboard Module Reference: `{header_filename}`'):
+ _emit_description(mdwriter, compounddef_element)
+ # When an API is deprecated it will be removed via #ifdef. When this is the
+ # case, we will no longer have macros, enums, typedefs, structs, or
+ # functions and thus the API should not be included in the site.
+ has_content = _emit_macros(mdwriter, compounddef_element)
+ has_content = _emit_enums(mdwriter, compounddef_element) or has_content
+ has_content = _emit_typedefs(mdwriter, compounddef_element,
+ xml_path) or has_content
+ has_content = _emit_structs(mdwriter, compounddef_element,
+ xml_path) or has_content
+ has_content = _emit_variables(mdwriter, compounddef_element) or has_content
+ has_content = _emit_functions(mdwriter, compounddef_element) or has_content
+ return has_content
+
+
+def generate(source_dir, output_dir):
+ if output_dir:
+ site_path = environment.get_site_dir(output_dir)
+ else:
+ site_path = environment.get_site_dir(source_dir)
+ doc_dir_path = os.path.join(site_path, 'docs', 'reference', 'starboard',
+ 'modules')
+ environment.make_clean_dirs(doc_dir_path)
+ starboard_directory_path = environment.get_starboard_dir(source_dir)
+ starboard_files = _get_files(starboard_directory_path, _HEADER_RE)
+ with environment.mkdtemp(suffix='.' + _SCRIPT_NAME) as temp_directory_path:
+ logging.debug('Working directory: %s', temp_directory_path)
+ doxygenated_directory_path = os.path.join(temp_directory_path,
+ 'doxygenated')
+ doxygenated_files = doxygen.doxygenate(starboard_files,
+ doxygenated_directory_path)
+ doxygen_directory_path = os.path.join(temp_directory_path, 'doxygen')
+ for sb_version in _OSS_STARBOARD_VERSIONS:
+ version_path = os.path.join(doxygen_directory_path, str(sb_version))
+ version_doc_dir_path = os.path.join(doc_dir_path, str(sb_version))
+ doxygen.doxygen(sb_version, doxygenated_files, [], version_path)
+ doxygen_xml_path = os.path.join(version_path, 'xml')
+ for header_xml_path in _get_files(doxygen_xml_path, _HEADER_XML_RE):
+ header_xml = ET.parse(header_xml_path)
+ for compounddef_element in header_xml.findall(
+ './/compounddef[@kind="file"][compoundname]'):
+ environment.make_dirs(version_doc_dir_path)
+ header_filename = _find_filename(compounddef_element)
+ doc_filename = (
+ os.path.splitext(os.path.basename(header_filename))[0] + '.md')
+ with contextlib.closing(StringIO()) as doc_file:
+ if not _emit_file(doc_file, compounddef_element, doxygen_xml_path):
+ continue
+ doc_contents = doc_file.getvalue()
+ doc_file_path = os.path.join(version_doc_dir_path, doc_filename)
+ environment.write_file(doc_file_path, doc_contents)
+
+ # Make the latest Starboard documentation version the default version.
+ if sb_version == _OSS_STARBOARD_VERSIONS[-1]:
+ doc_file_path = os.path.join(doc_dir_path, doc_filename)
+ environment.write_file(doc_file_path, doc_contents)
+
+ return 0
+
+
+def main(argv):
+ environment.setup_logging()
+ options = environment.parse_arguments(__doc__, argv)
+ environment.set_log_level(options.log_delta)
+ return generate(options.source, options.out)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/cobalt/site/scripts/doxyfile.template b/cobalt/site/scripts/doxyfile.template
new file mode 100644
index 000000000000..f6c5b93a3022
--- /dev/null
+++ b/cobalt/site/scripts/doxyfile.template
@@ -0,0 +1,332 @@
+# Doxyfile 1.8.6
+
+DOXYFILE_ENCODING = UTF-8
+PROJECT_NAME = "Starboard API"
+PROJECT_NUMBER = {project_number}
+PROJECT_BRIEF = "The Cobalt Porting API"
+OUTPUT_DIRECTORY = {output_directory}
+CREATE_SUBDIRS = NO
+OUTPUT_LANGUAGE = English
+BRIEF_MEMBER_DESC = NO
+REPEAT_BRIEF = NO
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+ALWAYS_DETAILED_SEC = NO
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = YES
+STRIP_FROM_PATH =
+STRIP_FROM_INC_PATH =
+SHORT_NAMES = NO
+JAVADOC_AUTOBRIEF = NO
+QT_AUTOBRIEF = NO
+MULTILINE_CPP_IS_BRIEF = NO
+INHERIT_DOCS = YES
+SEPARATE_MEMBER_PAGES = NO
+TAB_SIZE = 2
+OPTIMIZE_OUTPUT_FOR_C = NO
+OPTIMIZE_OUTPUT_JAVA = NO
+OPTIMIZE_FOR_FORTRAN = NO
+OPTIMIZE_OUTPUT_VHDL = NO
+EXTENSION_MAPPING =
+MARKDOWN_SUPPORT = YES
+AUTOLINK_SUPPORT = YES
+BUILTIN_STL_SUPPORT = YES
+CPP_CLI_SUPPORT = NO
+SIP_SUPPORT = NO
+IDL_PROPERTY_SUPPORT = YES
+DISTRIBUTE_GROUP_DOC = NO
+SUBGROUPING = NO
+INLINE_GROUPED_CLASSES = NO
+INLINE_SIMPLE_STRUCTS = NO
+TYPEDEF_HIDES_STRUCT = NO
+LOOKUP_CACHE_SIZE = 0
+EXTRACT_ALL = NO
+EXTRACT_PRIVATE = NO
+EXTRACT_PACKAGE = NO
+EXTRACT_STATIC = NO
+EXTRACT_LOCAL_CLASSES = NO
+EXTRACT_LOCAL_METHODS = NO
+EXTRACT_ANON_NSPACES = NO
+HIDE_UNDOC_MEMBERS = YES
+HIDE_UNDOC_CLASSES = YES
+HIDE_FRIEND_COMPOUNDS = YES
+HIDE_IN_BODY_DOCS = NO
+INTERNAL_DOCS = NO
+CASE_SENSE_NAMES = YES
+HIDE_SCOPE_NAMES = NO
+SHOW_INCLUDE_FILES = NO
+SHOW_GROUPED_MEMB_INC = NO
+FORCE_LOCAL_INCLUDES = NO
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = NO
+SORT_BRIEF_DOCS = NO
+SORT_MEMBERS_CTORS_1ST = NO
+SORT_GROUP_NAMES = NO
+SORT_BY_SCOPE_NAME = NO
+STRICT_PROTO_MATCHING = NO
+GENERATE_TODOLIST = NO
+GENERATE_TESTLIST = NO
+GENERATE_BUGLIST = NO
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+SHOW_USED_FILES = NO
+SHOW_FILES = NO
+SHOW_NAMESPACES = YES
+FILE_VERSION_FILTER =
+LAYOUT_FILE =
+CITE_BIB_FILES =
+
+QUIET = NO
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_IF_DOC_ERROR = YES
+WARN_NO_PARAMDOC = NO
+WARN_FORMAT = "$file:$line: $text"
+WARN_LOGFILE =
+INPUT = {input_files}
+INPUT_ENCODING = UTF-8
+FILE_PATTERNS = *.c \
+ *.cc \
+ *.cxx \
+ *.cpp \
+ *.c++ \
+ *.java \
+ *.ii \
+ *.ixx \
+ *.ipp \
+ *.i++ \
+ *.inl \
+ *.idl \
+ *.ddl \
+ *.odl \
+ *.h \
+ *.hh \
+ *.hxx \
+ *.hpp \
+ *.h++ \
+ *.cs \
+ *.d \
+ *.php \
+ *.php4 \
+ *.php5 \
+ *.phtml \
+ *.inc \
+ *.m \
+ *.markdown \
+ *.md \
+ *.mm \
+ *.dox \
+ *.py \
+ *.f90 \
+ *.f \
+ *.for \
+ *.tcl \
+ *.vhd \
+ *.vhdl \
+ *.ucf \
+ *.qsf \
+ *.as \
+ *.js
+RECURSIVE = NO
+EXCLUDE =
+EXCLUDE_SYMLINKS = NO
+EXCLUDE_PATTERNS =
+EXCLUDE_SYMBOLS =
+EXAMPLE_PATH =
+EXAMPLE_PATTERNS = *
+EXAMPLE_RECURSIVE = NO
+IMAGE_PATH =
+INPUT_FILTER =
+FILTER_PATTERNS =
+FILTER_SOURCE_FILES = NO
+FILTER_SOURCE_PATTERNS =
+USE_MDFILE_AS_MAINPAGE =
+
+SOURCE_BROWSER = NO
+INLINE_SOURCES = NO
+STRIP_CODE_COMMENTS = YES
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION = NO
+REFERENCES_LINK_SOURCE = YES
+SOURCE_TOOLTIPS = YES
+USE_HTAGS = NO
+VERBATIM_HEADERS = NO
+
+ALPHABETICAL_INDEX = YES
+COLS_IN_ALPHA_INDEX = 5
+IGNORE_PREFIX =
+
+GENERATE_HTML = NO
+HTML_OUTPUT = html
+HTML_FILE_EXTENSION = .html
+HTML_HEADER =
+HTML_FOOTER =
+HTML_STYLESHEET =
+HTML_EXTRA_STYLESHEET =
+HTML_EXTRA_FILES =
+HTML_COLORSTYLE_HUE = 220
+HTML_COLORSTYLE_SAT = 100
+HTML_COLORSTYLE_GAMMA = 80
+HTML_TIMESTAMP = YES
+HTML_DYNAMIC_SECTIONS = NO
+HTML_INDEX_NUM_ENTRIES = 100
+
+GENERATE_DOCSET = NO
+DOCSET_FEEDNAME = "Doxygen generated docs"
+DOCSET_BUNDLE_ID = org.doxygen.Project
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+DOCSET_PUBLISHER_NAME = Publisher
+
+GENERATE_HTMLHELP = NO
+CHM_FILE =
+HHC_LOCATION =
+GENERATE_CHI = NO
+CHM_INDEX_ENCODING =
+BINARY_TOC = NO
+TOC_EXPAND = NO
+
+GENERATE_QHP = NO
+QCH_FILE =
+QHP_NAMESPACE = org.doxygen.Project
+QHP_VIRTUAL_FOLDER = doc
+QHP_CUST_FILTER_NAME =
+QHP_CUST_FILTER_ATTRS =
+QHP_SECT_FILTER_ATTRS =
+QHG_LOCATION =
+
+GENERATE_ECLIPSEHELP = NO
+ECLIPSE_DOC_ID = org.doxygen.Project
+DISABLE_INDEX = NO
+GENERATE_TREEVIEW = NO
+ENUM_VALUES_PER_LINE = 4
+TREEVIEW_WIDTH = 250
+EXT_LINKS_IN_WINDOW = NO
+FORMULA_FONTSIZE = 10
+FORMULA_TRANSPARENT = YES
+USE_MATHJAX = NO
+MATHJAX_FORMAT = HTML-CSS
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+MATHJAX_EXTENSIONS =
+MATHJAX_CODEFILE =
+SEARCHENGINE = YES
+SERVER_BASED_SEARCH = NO
+EXTERNAL_SEARCH = NO
+SEARCHENGINE_URL =
+SEARCHDATA_FILE = searchdata.xml
+EXTERNAL_SEARCH_ID =
+EXTRA_SEARCH_MAPPINGS =
+
+GENERATE_LATEX = NO
+LATEX_OUTPUT = latex
+LATEX_CMD_NAME = latex
+MAKEINDEX_CMD_NAME = makeindex
+COMPACT_LATEX = NO
+PAPER_TYPE = a4
+EXTRA_PACKAGES =
+LATEX_HEADER =
+LATEX_FOOTER =
+LATEX_EXTRA_FILES =
+PDF_HYPERLINKS = YES
+USE_PDFLATEX = YES
+LATEX_BATCHMODE = NO
+LATEX_HIDE_INDICES = NO
+LATEX_SOURCE_CODE = NO
+LATEX_BIB_STYLE = plain
+
+GENERATE_RTF = NO
+RTF_OUTPUT = rtf
+COMPACT_RTF = NO
+RTF_HYPERLINKS = NO
+RTF_STYLESHEET_FILE =
+RTF_EXTENSIONS_FILE =
+
+GENERATE_MAN = NO
+MAN_OUTPUT = man
+MAN_EXTENSION = .3
+MAN_LINKS = NO
+
+GENERATE_XML = YES
+XML_OUTPUT = xml
+XML_SCHEMA =
+XML_DTD =
+XML_PROGRAMLISTING = NO
+
+GENERATE_DOCBOOK = NO
+DOCBOOK_OUTPUT = docbook
+
+GENERATE_AUTOGEN_DEF = NO
+
+GENERATE_PERLMOD = NO
+PERLMOD_LATEX = NO
+PERLMOD_PRETTY = YES
+PERLMOD_MAKEVAR_PREFIX =
+
+ENABLE_PREPROCESSING = YES
+MACRO_EXPANSION = YES
+EXPAND_ONLY_PREDEF = YES
+SEARCH_INCLUDES = YES
+INCLUDE_PATH =
+INCLUDE_FILE_PATTERNS = *.h
+PREDEFINED = COMPONENT_BUILD=1 \
+ SB_CAN(x)=1 \
+ SB_C_FORCE_INLINE=inline \
+ SB_C_INLINE=inline \
+ SB_DEPRECATED_EXTERNAL(x)=x \
+ SB_EXPORT= \
+ SB_HAS(x)=1 \
+ SB_IS(x)=1 \
+ STARBOARD_IMPLEMENTATION \
+ {predefined_macros}
+EXPAND_AS_DEFINED =
+SKIP_FUNCTION_MACROS = NO
+
+TAGFILES =
+GENERATE_TAGFILE =
+ALLEXTERNALS = NO
+EXTERNAL_GROUPS = NO
+EXTERNAL_PAGES = NO
+PERL_PATH = /usr/bin/perl
+
+CLASS_DIAGRAMS = NO
+MSCGEN_PATH =
+DIA_PATH =
+HIDE_UNDOC_RELATIONS = YES
+HAVE_DOT = NO
+DOT_NUM_THREADS = 0
+DOT_FONTNAME = Helvetica
+DOT_FONTSIZE = 10
+DOT_FONTPATH =
+CLASS_GRAPH = NO
+COLLABORATION_GRAPH = NO
+GROUP_GRAPHS = NO
+UML_LOOK = NO
+UML_LIMIT_NUM_FIELDS = 10
+TEMPLATE_RELATIONS = NO
+INCLUDE_GRAPH = NO
+INCLUDED_BY_GRAPH = NO
+CALL_GRAPH = NO
+CALLER_GRAPH = NO
+GRAPHICAL_HIERARCHY = NO
+DIRECTORY_GRAPH = NO
+DOT_IMAGE_FORMAT = png
+INTERACTIVE_SVG = NO
+DOT_PATH =
+DOTFILE_DIRS =
+MSCFILE_DIRS =
+DIAFILE_DIRS =
+DOT_GRAPH_MAX_NODES = 50
+MAX_DOT_GRAPH_DEPTH = 0
+DOT_TRANSPARENT = NO
+DOT_MULTI_TARGETS = NO
+GENERATE_LEGEND = YES
+DOT_CLEANUP = YES
diff --git a/cobalt/site/scripts/doxygen.py b/cobalt/site/scripts/doxygen.py
new file mode 100644
index 000000000000..499e25817160
--- /dev/null
+++ b/cobalt/site/scripts/doxygen.py
@@ -0,0 +1,172 @@
+# Copyright 2017 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Python wrapper for Doxygen."""
+
+import errno
+import logging
+import os
+import re
+import subprocess
+
+import environment
+
+_MODULE_OVERVIEW_PATTERN = r'Module Overview: '
+_MODULE_OVERVIEW_RE = re.compile(_MODULE_OVERVIEW_PATTERN)
+
+
+def _subprocess(command, working_directory):
+ """Executes command in working_directory."""
+ if not os.path.isdir(working_directory):
+ raise RuntimeError(
+ f'Running {command}: directory {working_directory} not found')
+
+ try:
+ # shell=True for Windows to be able to find git in the PATH.
+ subprocess.check_output(
+ ' '.join(command),
+ shell=True,
+ cwd=working_directory,
+ stderr=subprocess.STDOUT)
+ return True
+ except subprocess.CalledProcessError as e:
+ logging.warning('%s: \"%s\" failed. Return Code: %d', working_directory,
+ e.cmd, e.returncode)
+ logging.debug('>>>\n%s\n<<<', e.output)
+ return False
+
+
+def _mkdirs(directory_path):
+ """Makes the given path and all ancestors necessary."""
+ try:
+ os.makedirs(directory_path)
+ except OSError as e:
+ if e.errno == errno.EEXIST and os.path.isdir(directory_path):
+ pass
+ else:
+ raise
+
+
+def _join_config(lines):
+ return ' \\\n '.join(lines)
+
+
+def doxygen(project_number, input_files, predefined_macros,
+ output_directory_path):
+ """Runs doxygen with the given semantic parameters."""
+ doxyfile_template_path = os.path.join(environment.SCRIPTS_DIR,
+ 'doxyfile.template')
+ doxyfile_template = environment.read_file(doxyfile_template_path)
+ os.makedirs(output_directory_path)
+ predefined_macros = predefined_macros[:]
+ predefined_macros.append(f'SB_API_VERSION={project_number}')
+ doxyfile_contents = doxyfile_template.format(
+ **{
+ 'project_number': 1,
+ 'output_directory': output_directory_path,
+ 'input_files': _join_config(input_files),
+ 'predefined_macros': _join_config(predefined_macros),
+ })
+
+ doxyfile_path = os.path.join(output_directory_path, 'Doxyfile')
+ environment.write_file(doxyfile_path, doxyfile_contents)
+ _subprocess(['doxygen', doxyfile_path], output_directory_path)
+
+
+def _split(line):
+ """Splits a line into a (indentation, lstripped content) tuple."""
+ stripped = line.lstrip()
+ return line[:(len(line) - len(stripped))], stripped
+
+
+def _doxygenate_line(line):
+ """Adds an extra slash to a comment line."""
+ indent, stripped = _split(line)
+ return indent + '/' + stripped
+
+
+def _doxygenate_lines(lines):
+ """Makes a list of comment lines visible to Doxygen."""
+ if not lines:
+ return []
+ indent, _ = _split(lines[0])
+ return [_doxygenate_line(x) for x in lines] + [indent + '///']
+
+
+def _is_comment(line):
+ """Whether the given line is a comment."""
+ stripped = line.lstrip()
+ return stripped[0] == '/'
+
+
+def _find_end_of_block(line_iterator):
+ """Consumes the next comment block, returning (comment_list, terminator)."""
+ lines = []
+ last_line = None
+
+ any_lines = False
+ for line in line_iterator:
+ any_lines = True
+ if not line or not _is_comment(line):
+ last_line = line
+ break
+ lines.append(line)
+ if not any_lines:
+ raise StopIteration
+
+ return lines, last_line
+
+
+def doxygenate(input_file_paths, output_directory):
+ """Converts a list of source files into more doxygen-friendly files."""
+ common_prefix_path = os.path.commonprefix(input_file_paths)
+ output_file_paths = []
+ os.makedirs(output_directory)
+ for input_file_path in input_file_paths:
+ output_file_path = os.path.join(output_directory,
+ input_file_path[len(common_prefix_path):])
+ _mkdirs(os.path.dirname(output_file_path))
+ input_contents = environment.read_lines(input_file_path)
+ output_contents = []
+ line_iterator = iter(x.rstrip() for x in input_contents)
+ try:
+ # Remove copyright header.
+ _, _ = _find_end_of_block(line_iterator)
+
+ # Doxygenate module overview.
+ lines, last_line = _find_end_of_block(line_iterator)
+ if not lines:
+ continue
+ output_contents.append(f'/// \\file {os.path.basename(input_file_path)}')
+ if _MODULE_OVERVIEW_RE.search(lines[0]):
+ del lines[0]
+ output_contents.extend(_doxygenate_lines(lines))
+ output_contents.append(last_line)
+
+ # Doxygenate module overview.
+ lines, last_line = _find_end_of_block(line_iterator)
+
+ # Doxygenate the rest of the file, block by block.
+ while True:
+ lines, last_line = _find_end_of_block(line_iterator)
+ if not last_line:
+ continue
+ if lines:
+ output_contents.extend(_doxygenate_lines(lines))
+ output_contents.append(last_line)
+ except StopIteration:
+ pass
+
+ environment.write_file(output_file_path, '\n'.join(output_contents))
+ output_file_paths.append(output_file_path)
+ return output_file_paths
diff --git a/cobalt/site/scripts/environment.py b/cobalt/site/scripts/environment.py
new file mode 100644
index 000000000000..8680b5602bda
--- /dev/null
+++ b/cobalt/site/scripts/environment.py
@@ -0,0 +1,165 @@
+# Copyright 2017 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Common variables and functions for site generation."""
+
+import argparse
+from contextlib import contextmanager
+import errno
+import logging
+import os
+import shutil
+import tempfile
+import textwrap
+
+SCRIPTS_DIR = os.path.abspath(os.path.dirname(__file__))
+COBALT_SOURCE_DIR = os.path.abspath(
+ os.path.join(*([SCRIPTS_DIR] + 3 * [os.pardir])))
+
+
+@contextmanager
+def mkdtemp(suffix='', prefix='tmp.', base_dir=None):
+ temp_directory_path = tempfile.mkdtemp(suffix, prefix, base_dir)
+ try:
+ yield temp_directory_path
+ finally:
+ if temp_directory_path:
+ try:
+ shutil.rmtree(temp_directory_path)
+ except IOError:
+ logging.warning('Failed to delete temp directory: %s',
+ temp_directory_path)
+ else:
+ logging.info('Deleted temp directory: %s', temp_directory_path)
+
+
+def read_lines(path):
+ with open(path, 'r', encoding='utf8') as contents:
+ return contents.readlines()
+
+
+def read_file(path):
+ with open(path, 'r', encoding='utf8') as contents:
+ return contents.read()
+
+
+def write_file(path, contents):
+ with open(path, 'w', encoding='utf8') as output:
+ output.write(contents)
+
+
+def setup_logging(default_level=logging.INFO):
+ logging_level = default_level
+ logging_format = '%(asctime)s.%(msecs)03d [%(levelname)-8s] %(message)s'
+ datetime_format = '%H:%M:%S'
+ logging.basicConfig(
+ level=logging_level, format=logging_format, datefmt=datetime_format)
+
+
+_LOG_LEVELS = [
+ logging.CRITICAL,
+ logging.ERROR,
+ logging.WARNING,
+ logging.INFO,
+ logging.DEBUG,
+]
+
+
+def set_log_level(log_delta):
+ """Sets the log level based on the log delta from default."""
+ logger = logging.getLogger()
+ try:
+ level_index = _LOG_LEVELS.index(logger.getEffectiveLevel())
+ except ValueError:
+ level_index = _LOG_LEVELS.index(logging.INFO)
+
+ level_index = min(len(_LOG_LEVELS) - 1, max(0, level_index + log_delta))
+ logging.getLogger().setLevel(_LOG_LEVELS[level_index])
+
+
+def parse_arguments(doc_string, argv):
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description=textwrap.dedent(doc_string))
+ parser.add_argument(
+ '-s',
+ '--source',
+ type=str,
+ default=COBALT_SOURCE_DIR,
+ help='The repository root that contains the source code to parse.')
+ parser.add_argument(
+ '-o',
+ '--out',
+ type=str,
+ default=None,
+ help='The root of the directory to output to.')
+ parser.add_argument(
+ '-v',
+ '--verbose',
+ dest='verbose_count',
+ default=0,
+ action='count',
+ help='Verbose level (multiple times for more).')
+ parser.add_argument(
+ '-q',
+ '--quiet',
+ dest='quiet_count',
+ default=0,
+ action='count',
+ help='Quietness level (multiple times for more).')
+ options = parser.parse_args(argv)
+ options.log_delta = options.verbose_count - options.quiet_count
+ return options
+
+
+def get_cobalt_dir(source_dir):
+ return os.path.join(source_dir, 'cobalt')
+
+
+def get_cobalt_build_dir(source_dir):
+ return os.path.join(get_cobalt_dir(source_dir), 'build')
+
+
+def get_site_dir(source_dir):
+ return os.path.join(get_cobalt_dir(source_dir), 'site')
+
+
+def get_starboard_dir(source_dir):
+ return os.path.join(source_dir, 'starboard')
+
+
+def get_starboard_build_dir(source_dir):
+ return os.path.join(get_starboard_dir(source_dir), 'build')
+
+
+def get_stub_platform_dir(source_dir):
+ return os.path.join(get_starboard_dir(source_dir), 'stub')
+
+
+def make_dirs(path):
+ """Make the specified directory and any parents in the path."""
+ if path and not os.path.isdir(path):
+ make_dirs(os.path.dirname(path))
+ try:
+ os.mkdir(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ pass
+
+
+def make_clean_dirs(path):
+ """Make the specified directory and any parents in the path."""
+ make_dirs(os.path.dirname(path))
+ shutil.rmtree(path, ignore_errors=True)
+ make_dirs(path)
diff --git a/cobalt/site/scripts/generate_site.py b/cobalt/site/scripts/generate_site.py
new file mode 100755
index 000000000000..2d30d2464744
--- /dev/null
+++ b/cobalt/site/scripts/generate_site.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+# Copyright 2023 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Generate the Cobalt Developer site markdown.
+
+This script generates all documentation derived from source, placing
+the precipitate markdown files in the specified output directory.
+"""
+
+import sys
+
+import cobalt_configuration_public
+import cobalt_documentation
+import cobalt_gn_configuration
+import cobalt_module_reference
+import environment
+
+
+def main(argv):
+ environment.setup_logging()
+ arguments = environment.parse_arguments(__doc__, argv)
+ environment.set_log_level(arguments.log_delta)
+
+ result = cobalt_configuration_public.main(arguments.source, arguments.out)
+ if result:
+ return result
+
+ result = cobalt_gn_configuration.main(arguments.source, arguments.out)
+ if result:
+ return result
+
+ result = cobalt_module_reference.generate(arguments.source, arguments.out)
+ if result:
+ return result
+
+ cobalt_documentation.copy_doc_locations(arguments.source, arguments.out)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/cobalt/site/scripts/markdown_writer.py b/cobalt/site/scripts/markdown_writer.py
new file mode 100644
index 000000000000..a954e9e35305
--- /dev/null
+++ b/cobalt/site/scripts/markdown_writer.py
@@ -0,0 +1,359 @@
+# Copyright 2017 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Provides an interface to produce valid, readable Markdown documents."""
+
+import contextlib
+import logging
+import textwrap
+
+_CONTEXT_NONE = 'none'
+_CONTEXT_LINK = 'link'
+_CONTEXT_LIST = 'list'
+_CONTEXT_ITEM = 'item'
+_CONTEXT_HEADING = 'heading'
+
+_LIST_TYPE_ORDERED = 'ordered'
+_LIST_TYPE_UNORDERED = 'unordered'
+
+# Ready to start a new parapgrah
+_STATE_BASE = 'base'
+
+# On an empty line with a paragraph on the previous line.
+_STATE_EMPTY = 'empty'
+
+# Inside a partial text block.
+_STATE_INLINE = 'inline'
+
+
+def _strip(string_or_none):
+ if string_or_none:
+ return string_or_none.strip()
+ return ''
+
+
+def _stripwrap(body, width):
+ return textwrap.fill(
+ _strip(body), width, break_long_words=False, replace_whitespace=False)
+
+
+def _get_list_prefix(list_type, depth):
+ list_marker = '1.' if list_type == _LIST_TYPE_ORDERED else '*'
+ return f'{" " * depth}{list_marker:4.4}'
+
+
+class _Context(object):
+ """Stores info about current context in the document."""
+
+ def __init__(self, context_type, url=None, list_type=None, pending=False):
+ self.context_type = context_type
+ self.url = url
+ self.list_type = list_type
+ self.pending = pending
+
+ def test_and_clear_pending_list(self):
+ if self.context_type == _CONTEXT_ITEM and self.pending:
+ self.pending = False
+ return True
+ return False
+
+
+class MarkdownWriter(object):
+ """Stateful wrapper for a file-like object to generate Markdown."""
+
+ def __init__(self, out, width=80, heading_level=0):
+ assert heading_level >= 0
+ self.out = out
+ self.width = width
+ self.base_heading_level = heading_level
+ self.heading_stack = []
+ self.context_stack = [_Context(_CONTEXT_NONE)]
+ self.state = _STATE_BASE
+ self.current_line = ''
+
+ def heading(self, levels=1):
+ logging.debug('MdW: heading(%d)', levels)
+ assert levels >= 1
+ self.context_stack.append(_Context(_CONTEXT_HEADING))
+ self.heading_stack.append(levels)
+ self._return_to_base()
+ self.text(('#' * self._get_heading_level()) + ' ', wrap=False)
+ assert self._get_heading_level() >= 1
+
+ def end_heading(self):
+ logging.debug('MdW: end_heading()')
+ assert self._get_context_type() == _CONTEXT_HEADING
+ self.context_stack.pop()
+ self._return_to_empty()
+
+ def pop_heading_level(self):
+ logging.debug('MdW: pop_heading_level()')
+ self.heading_stack.pop()
+ assert self._get_heading_level() >= 0
+
+ def code_block(self, contents, language=None):
+ logging.debug('MdW: code_block(%s, %s)', contents.replace('\n', '\\n'),
+ language)
+ self._return_to_base()
+ if not language:
+ language = ''
+ self.text(f'```{language}\n{contents}\n```\n\n', wrap=False)
+ self._set_state(_STATE_BASE, 'code block')
+
+ def ordered_list(self):
+ logging.debug('MdW: ordered_list()')
+ self._return_to_base()
+ self.context_stack.append(
+ _Context(_CONTEXT_LIST, list_type=_LIST_TYPE_ORDERED))
+
+ def unordered_list(self):
+ logging.debug('MdW: unordered_list()')
+ self._return_to_base()
+ self.context_stack.append(
+ _Context(_CONTEXT_LIST, list_type=_LIST_TYPE_UNORDERED))
+
+ def end_list(self):
+ logging.debug('MdW: end_list()')
+ assert self._get_context_type() == _CONTEXT_LIST
+ self.context_stack.pop()
+ self._return_to_base()
+
+ def item(self):
+ logging.debug('MdW: item()')
+ assert self._get_context_type() == _CONTEXT_LIST
+ self.context_stack.append(_Context(_CONTEXT_ITEM, pending=True))
+
+ def end_item(self):
+ logging.debug('MdW: end_item()')
+ assert self._get_context_type() == _CONTEXT_ITEM
+ self._return_to_empty()
+ self.context_stack.pop()
+
+ def link(self, url):
+ logging.debug('MdW: link(%s)', url)
+ assert url
+ self.context_stack.append(_Context(_CONTEXT_LINK, url=url))
+ self.text('[', wrap=True)
+
+ def end_link(self):
+ logging.debug('MdW: end_link()')
+ context = self._get_context()
+ assert context.context_type == _CONTEXT_LINK
+ self.text(f']({context.url})', wrap=False)
+ self.context_stack.pop()
+
+ def paragraph(self):
+ logging.debug('MdW: paragraph()')
+ self._return_to_base()
+
+ def end_paragraph(self):
+ logging.debug('MdW: end_paragraph()')
+ self._return_to_empty()
+
+ def code(self, contents, wrap=None):
+ self._style_text(contents.replace('`', '``'), '`', wrap)
+
+ def bold(self, contents, wrap=None):
+ self._style_text(contents, '**', wrap)
+
+ def highlight(self, contents, wrap=None):
+ self._style_text(contents, '==', wrap)
+
+ def italics(self, contents, wrap=None):
+ self._style_text(contents, '*', wrap)
+
+ def strikeout(self, contents, wrap=None):
+ self._style_text(contents, '~~', wrap)
+
+ def text(self, contents, wrap=None):
+ logging.debug('MdW: text(%s, %s)', contents.replace('\n', '\\n'), str(wrap))
+ if not contents or not contents.strip():
+ return
+
+ wrap = self._default_wrap(wrap)
+ if wrap:
+ contents = contents.strip()
+ if self.current_line and self.current_line[-1] != ' ':
+ contents = ' ' + contents
+ if not contents:
+ return
+ current_line_length = len(self.current_line)
+ contents = textwrap.fill(
+ self.current_line + contents,
+ self._get_wrap_width(),
+ break_long_words=False)
+ contents = contents[current_line_length:]
+
+ lines = contents.split('\n')
+ if not lines:
+ return
+
+ if self.current_line or self.state in [_STATE_BASE, _STATE_EMPTY]:
+ if self._get_context().test_and_clear_pending_list():
+ self._return_to_empty()
+ self.out.write(self._get_list_prefix())
+ self._set_state(_STATE_INLINE, 'list prefix')
+ elif self.state in [_STATE_BASE, _STATE_EMPTY]:
+ indent = self._get_indent()
+ if indent:
+ self.out.write(indent)
+ self._set_state(_STATE_INLINE, 'indentation')
+ self.out.write(lines[0])
+ self.current_line += lines[0]
+ self._set_state(_STATE_INLINE, 'first line')
+ del lines[0]
+ if lines:
+ self.out.write('\n')
+ self.current_line = ''
+ self._set_state(_STATE_EMPTY, 'more lines')
+
+ if lines:
+ indent = self._get_indent()
+ conjunction = '\n' + indent
+ self.out.write(indent + conjunction.join(lines))
+ self.current_line = lines[-1]
+
+ if not self.current_line:
+ self._set_state(_STATE_EMPTY, 'no current line')
+ else:
+ self._set_state(_STATE_INLINE, 'current line')
+
+ @contextlib.contextmanager
+ def auto_heading(self, levels=1):
+ self.heading(levels=levels)
+ yield
+ self.end_heading()
+
+ @contextlib.contextmanager
+ def auto_scoped_heading(self, text, levels=1):
+ with self.auto_heading(levels):
+ self.text(text)
+ yield
+ self.pop_heading_level()
+
+ @contextlib.contextmanager
+ def auto_paragraph(self):
+ self.paragraph()
+ yield
+ self.end_paragraph()
+
+ @contextlib.contextmanager
+ def auto_link(self, url):
+ self.link(url)
+ yield
+ self.end_link()
+
+ @contextlib.contextmanager
+ def auto_unordered_list(self):
+ self.unordered_list()
+ yield
+ self.end_list()
+
+ @contextlib.contextmanager
+ def auto_ordered_list(self):
+ self.ordered_list()
+ yield
+ self.end_list()
+
+ @contextlib.contextmanager
+ def auto_item(self):
+ self.item()
+ yield
+ self.end_item()
+
+ def _get_list_context(self):
+ for context in reversed(self.context_stack):
+ if context.context_type == _CONTEXT_LIST:
+ return context
+ return None
+
+ def _get_list_type(self):
+ context = self._get_list_context()
+ if not context:
+ return None
+ return context.list_type
+
+ def _get_list_depth(self):
+ depth = 0
+ for context in self.context_stack:
+ if context.context_type == _CONTEXT_LIST:
+ depth += 1
+ if not depth:
+ return 0
+ return depth
+
+ def _get_context_type(self):
+ context = self._get_context()
+ if not context:
+ return None
+ return context.context_type
+
+ def _get_context(self):
+ if not self.context_stack:
+ return None
+ return self.context_stack[-1]
+
+ def _style_text(self, contents, style, wrap):
+ if not contents or not contents.strip():
+ return
+ self.text(f'{style}{contents}{style}', wrap)
+
+ def _return_to_empty(self):
+ if self.state in [_STATE_BASE, _STATE_EMPTY]:
+ return
+
+ if self.state == _STATE_INLINE:
+ self.out.write('\n')
+ self.current_line = ''
+ self._set_state(_STATE_EMPTY)
+ return
+
+ assert False
+
+ def _return_to_base(self):
+ self._return_to_empty()
+
+ if self.state == _STATE_BASE:
+ return
+
+ if self.state == _STATE_EMPTY:
+ self.out.write('\n')
+ self._set_state(_STATE_BASE)
+ return
+
+ assert False
+
+ def _set_state(self, state, label=''):
+ if self.state != state:
+ logging.debug('MdW: STATE%s: %s -> %s',
+ '(' + label + ')' if label else '', self.state, state)
+ self.state = state
+
+ def _get_indent(self):
+ return (' ' * 4) * self._get_list_depth()
+
+ def _get_heading_level(self):
+ return self.base_heading_level + sum(self.heading_stack)
+
+ def _get_list_prefix(self):
+ return _get_list_prefix(self._get_list_type(), self._get_list_depth() - 1)
+
+ def _get_wrap_width(self):
+ return self.width - self._get_list_depth() * 4
+
+ def _default_wrap(self, wrap):
+ if wrap is not None:
+ return wrap
+ if self._get_context_type() in [_CONTEXT_LINK, _CONTEXT_HEADING]:
+ return False
+ return True
diff --git a/cobalt/site/scripts/markdown_writer_test.py b/cobalt/site/scripts/markdown_writer_test.py
new file mode 100755
index 000000000000..4de6bc98d48d
--- /dev/null
+++ b/cobalt/site/scripts/markdown_writer_test.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# Copyright 2017 The Cobalt Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests the markdown_writer module."""
+
+import contextlib
+from io import StringIO
+import textwrap
+import unittest
+
+from cobalt.site.scripts import markdown_writer
+
+_LOREM_IPSUM_1 = ('Lorem ipsum dolor sit amet, consectetur adipiscing elit, '
+ 'sed do eiusmod tempor incididunt ut labore et dolore magna '
+ 'aliqua.')
+_LOREM_IPSUM_2 = ('Ut enim ad minim veniam, quis nostrud exercitation '
+ 'ullamco laboris nisi ut aliquip ex ea commodo consequat. '
+ 'Duis aute irure dolor in reprehenderit in voluptate velit '
+ 'esse cillum dolore eu fugiat nulla pariatur.')
+_LOREM_IPSUM_3 = ('Excepteur sint occaecat cupidatat non proident, sunt in '
+ 'culpa qui officia deserunt mollit anim id est laborum.')
+
+_LOREM_IPSUM = f'{_LOREM_IPSUM_1} {_LOREM_IPSUM_2} {_LOREM_IPSUM_3}'
+
+
+def _indent(text, size):
+ return _indent_behind(text, ' ' * size)
+
+
+def _indent_behind(text, prefix):
+ if not text:
+ return text
+
+ indentation = '\n' + (' ' * len(prefix))
+ result = prefix + indentation.join(text.split('\n'))
+ return result
+
+
+def _wrap_behind(text, prefix, width):
+ if not text:
+ return text
+ return _indent_behind(textwrap.fill(text, width - len(prefix)), prefix)
+
+
+class MarkdownWriterTest(unittest.TestCase):
+ """Tests the markdown_writer module."""
+
+ def testBasic(self):
+ with contextlib.closing(StringIO()) as test_io:
+ out = markdown_writer.MarkdownWriter(test_io)
+ out.text(_LOREM_IPSUM_1)
+ out.text(_LOREM_IPSUM_2)
+ out.text(_LOREM_IPSUM_3)
+ out.text(_LOREM_IPSUM_1)
+ out.text(_LOREM_IPSUM_2)
+ out.text(_LOREM_IPSUM_3)
+ actual = test_io.getvalue()
+
+ expected = textwrap.fill(_LOREM_IPSUM + ' ' + _LOREM_IPSUM, 80)
+ self.assertEqual(expected, actual)
+
+ def testParagraph(self):
+ with contextlib.closing(StringIO()) as test_io:
+ out = markdown_writer.MarkdownWriter(test_io)
+ out.text(_LOREM_IPSUM_1)
+ out.text(_LOREM_IPSUM_2)
+ out.paragraph()
+ out.text(_LOREM_IPSUM_3)
+ out.text(_LOREM_IPSUM_1)
+ out.paragraph()
+ out.text(_LOREM_IPSUM_2)
+ out.text(_LOREM_IPSUM_3)
+ actual = test_io.getvalue()
+
+ expected = (
+ textwrap.fill(_LOREM_IPSUM_1 + ' ' + _LOREM_IPSUM_2, 80) + '\n\n' +
+ textwrap.fill(_LOREM_IPSUM_3 + ' ' + _LOREM_IPSUM_1, 80) + '\n\n' +
+ textwrap.fill(_LOREM_IPSUM_2 + ' ' + _LOREM_IPSUM_3, 80))
+ self.assertEqual(expected, actual)
+
+ def testListDepth1(self):
+ with contextlib.closing(StringIO()) as test_io:
+ out = markdown_writer.MarkdownWriter(test_io)
+ out.paragraph()
+ out.paragraph()
+ out.paragraph()
+ with out.auto_ordered_list():
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_1)
+ out.text(_LOREM_IPSUM_2)
+ out.paragraph()
+ out.paragraph()
+ with out.auto_item():
+ out.paragraph()
+ out.text(_LOREM_IPSUM_3)
+ out.text(_LOREM_IPSUM_1)
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_2)
+ out.paragraph()
+ out.text(_LOREM_IPSUM_3)
+ out.paragraph()
+ out.text(_LOREM_IPSUM_1)
+ actual = test_io.getvalue()
+ list_prefix = '1. '
+ expected = (
+ _wrap_behind(_LOREM_IPSUM_1 + ' ' + _LOREM_IPSUM_2, list_prefix, 80) +
+ '\n\n' +
+ _wrap_behind(_LOREM_IPSUM_3 + ' ' + _LOREM_IPSUM_1, list_prefix, 80) +
+ '\n' + _wrap_behind(_LOREM_IPSUM_2, list_prefix, 80) + '\n\n' +
+ _wrap_behind(_LOREM_IPSUM_3, ' ' * len(list_prefix), 80) + '\n\n' +
+ textwrap.fill(_LOREM_IPSUM_1, 80))
+ self.assertEqual(expected, actual)
+
+ def testListDepth2(self):
+ with contextlib.closing(StringIO()) as test_io:
+ out = markdown_writer.MarkdownWriter(test_io)
+ with out.auto_ordered_list():
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_1)
+ with out.auto_unordered_list():
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_2)
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_3)
+ out.text(_LOREM_IPSUM_1)
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_2)
+ with out.auto_unordered_list():
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_3)
+ out.text(_LOREM_IPSUM_1)
+ with out.auto_item():
+ out.text(_LOREM_IPSUM_2)
+ out.text(_LOREM_IPSUM_3)
+ actual = test_io.getvalue()
+ list_prefix_1 = '1. '
+ list_prefix_2 = ' * '
+ expected = (
+ _wrap_behind(_LOREM_IPSUM_1, list_prefix_1, 80) + '\n\n' +
+ _wrap_behind(_LOREM_IPSUM_2, list_prefix_2, 80) + '\n' +
+ _wrap_behind(_LOREM_IPSUM_3 + ' ' + _LOREM_IPSUM_1, list_prefix_2, 80) +
+ '\n\n' + _wrap_behind(_LOREM_IPSUM_2, list_prefix_1, 80) + '\n\n' +
+ _wrap_behind(_LOREM_IPSUM_3 + ' ' + _LOREM_IPSUM_1, list_prefix_2, 80) +
+ '\n' + _wrap_behind(_LOREM_IPSUM_2, list_prefix_2, 80) + '\n\n' +
+ textwrap.fill(_LOREM_IPSUM_3, 80))
+ self.assertEqual(expected, actual)
+
+
+if __name__ == '__main__':
+ unittest.main()