diff --git a/.vscode/launch.json b/.vscode/launch.json index 18837656..02ecd063 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -8,12 +8,6 @@ "host": "${config:pygls.server.debugHost}", "port": "${config:pygls.server.debugPort}" }, - "pathMappings": [ - { - "localRoot": "${workspaceFolder}", - "remoteRoot": "." - } - ], "justMyCode": false }, { diff --git a/docs/assets/semantic-tokens-example.png b/docs/assets/semantic-tokens-example.png new file mode 100644 index 00000000..be9d3a36 Binary files /dev/null and b/docs/assets/semantic-tokens-example.png differ diff --git a/docs/generate_token_visualisation.py b/docs/generate_token_visualisation.py new file mode 100644 index 00000000..a11c245e --- /dev/null +++ b/docs/generate_token_visualisation.py @@ -0,0 +1,664 @@ +"""Script for generating the interactive semantic token display for the docs.""" + +from __future__ import annotations + +import argparse +import enum +import operator +import pathlib +import string +import textwrap +import typing +from dataclasses import dataclass, field +from functools import reduce + +if typing.TYPE_CHECKING: + from typing import List, Optional, Tuple, Type + + +class TokenModifier(enum.IntFlag): + """Represents a token modifier""" + + deprecated = enum.auto() + readonly = enum.auto() + defaultLibrary = enum.auto() + definition = enum.auto() + + +@dataclass +class Token: + line: int + offset: int + text: str + + tok_type: str + tok_modifiers: List[TokenModifier] = field(default_factory=list) + + @property + def tok_id(self): + return f"tok-{self.text}-{id(self)}" + + def __iter__(self): + return iter(self.text) + + def __len__(self): + return len(self.text) + + +POSITIONS_LAYOUT = string.Template( + """\ +
+
+ +
+
+ + +${table} + +
+
+${tokenCalcs} +
+
+
+ +""" +) + +TYPES_LAYOUT = string.Template( + """\ +
+
+ +
+
+ +
+
+ +""" +) + +MODIFIER_LAYOUT = string.Template( + """\ +
+
+ +
+
+ + + +${modifierTable} + +
+
+${tokenModifierCalcs} +
+
+
+ +""" +) + + +def main(dest: pathlib.Path): + token_types = ["variable", "number", "operator", "function"] + tokens = [ + Token(0, 0, "c", "variable", [TokenModifier.definition]), + Token(0, 2, "=", "operator"), + Token( + 0, + 2, + "sqrt", + "function", + [TokenModifier.deprecated, TokenModifier.defaultLibrary], + ), + Token(0, 4, "(", "operator"), + Token(1, 2, "a", "variable"), + Token(0, 1, "^", "operator"), + Token(0, 1, "2", "number"), + Token(0, 2, "+", "operator"), + Token(0, 2, "b", "variable", [TokenModifier.readonly]), + Token(0, 1, "^", "operator"), + Token(0, 1, "2", "number"), + Token(1, 0, ")", "operator"), + ] + + calcs, styles = render_token_position_calcs_and_styles(tokens) + + positions = dest / "positions.html" + positions.write_text( + POSITIONS_LAYOUT.substitute( + { + "table": textwrap.indent(render_table(tokens), indent(4)), + "tokenCalcs": textwrap.indent(calcs, indent(3)), + "tokenList": textwrap.indent(render_token_list(tokens), indent(3)), + "tokenStyles": textwrap.indent(styles, indent(1)), + } + ) + ) + + types = dest / "types.html" + types.write_text( + TYPES_LAYOUT.substitute( + { + "tokenTypes": textwrap.indent( + render_token_types(token_types), indent(3) + ), + "tokenList": textwrap.indent( + render_token_list(tokens, token_types), indent(3) + ), + "tokenStyles": textwrap.indent( + render_token_type_styles(tokens, token_types), indent(3) + ), + } + ) + ) + + modifier_list, modifier_table = render_token_modifiers(TokenModifier) + modifier_calcs, modifier_styles = render_token_modifier_calcs_and_styles( + tokens, TokenModifier + ) + + modifiers = dest / "modifiers.html" + modifiers.write_text( + MODIFIER_LAYOUT.substitute( + { + "tokenModifiers": textwrap.indent(modifier_list, indent(3)), + "modifierTable": textwrap.indent(modifier_table, indent(3)), + "tokenList": textwrap.indent( + render_token_list(tokens, token_types, include_modifiers=True), + indent(3), + ), + "tokenStyles": textwrap.indent(modifier_styles, indent(3)), + "tokenModifierCalcs": textwrap.indent(modifier_calcs, indent(3)), + } + ) + ) + + +cli = argparse.ArgumentParser() +cli.add_argument( + "-o", "--output", type=pathlib.Path, default="-", help="the directory to write to" +) + + +def indent(level: int) -> str: + return level * 2 * " " + + +def tokens_to_lines(tokens: list[Token]) -> list[list[str | Token]]: + """Convert a list of tokens into the corresponding text""" + text: list[list[str | Token]] = [] + current_line: list[str | Token] = [] + + for token in tokens: + if token.line > 0: + text.append(current_line) + current_line = [] + + prev_token = current_line[-1] if len(current_line) > 0 else "" + if (padding := token.offset - len(prev_token)) > 0: + current_line.append(padding * " ") + + current_line.append(token) + + # Commit the final line + text.append(current_line) + + return text + + +def render_header_row(width: int) -> str: + """Render the width of the table, render the character indices.""" + result = [""] + + # Padding for the line number + result.append(f'{indent(1)}') + + # Render the column numbers + for idx in range(width - 1): + result.append(f'{indent(1)}{idx}') + + result.append("") + + return "\n".join(result) + + +def render_text_row(line: int, text: list[str | Token], width: int) -> str: + """Render the given line of text as a row of table cells.""" + result = [""] + + # Render the line number + result.append(f'{indent(1)}{line}') + + # Render the text + for token in text: + for char in token: + token_id = f' data-tok="{token.tok_id}"' if isinstance(token, Token) else "" + result.append(f'{indent(1)}{char.strip()}') + + # Don't forget to pad the line to the max line length. + remaining_cols = width - (sum(map(len, text)) + 1) + for _ in range(remaining_cols): + result.append(f'{indent(1)}') + + result.append("") + + return "\n".join(result) + + +def render_table(tokens: list[Token]) -> str: + """Given a list of tokens, render the corresponding text as a grid of characters.""" + text = tokens_to_lines(tokens) + width = max(sum(map(len, line)) for line in text) + 1 + + rows = [render_header_row(width)] + for linum, line in enumerate(text): + rows.append(render_text_row(linum + 1, line, width)) + + return "\n".join(rows) + + +def render_token_list( + tokens: list[Token], + token_types: Optional[List[str]] = None, + include_modifiers: bool = False, +) -> str: + """Render the list of tokens. + + Parameters + ---------- + tokens + The list of tokens to render + + token_types + If set, also render a column depicting the token's type + + include_modifiers + If ``True``, also render a column depicting the token's modifiers + """ + lines = [] + + for token in tokens: + lines.append(f'
  • ') + lines.append(f"{indent(1)}{token.text}") + lines.append(f"{indent(1)}{token.line}") + lines.append(f"{indent(1)}{token.offset}") + lines.append(f"{indent(1)}{len(token)}") + + if token_types is not None: + tok_type = token_types.index(token.tok_type) + lines.append(f'{indent(1)}{tok_type}') + + if include_modifiers: + value = 0 + if len(token.tok_modifiers) > 0: + value = reduce(operator.or_, token.tok_modifiers) + + lines.append(f"{indent(1)}{value}") + + lines.append("
  • ") + + return "\n".join(lines) + + +def render_token_types(token_types: List[str]) -> str: + """Render the list of token types.""" + lines = [] + + for idx, tok_type in enumerate(token_types): + lines.append(f'
  • ') + lines.append(f"{indent(1)}{idx}") + lines.append(f'{indent(1)}{tok_type}') + lines.append("
  • ") + + return "\n".join(lines) + + +def render_token_modifiers(modifiers: Type[enum.IntFlag]) -> Tuple[str, str]: + """Render the list of token modifiers.""" + list_lines = [] + + for idx, modifier in enumerate(modifiers): + list_lines.append(f'
  • ') + list_lines.append(f"{indent(1)}{idx}") + list_lines.append( + f'{indent(1)}{modifier.name}' + ) + list_lines.append("
  • ") + + idx_cells: List[str] = [] + value_cells: List[str] = [] + + for idx, modifier in enumerate(modifiers): + idx_cells.insert(0, f'{idx}') + value_cells.insert( + 0, f'{modifier.value}' + ) + + idx_cells.insert(0, "Index") + value_cells.insert(0, "2Index") + + table_lines = [ + "", + textwrap.indent("\n".join(idx_cells), indent(1)), + "", + "", + textwrap.indent("\n".join(value_cells), indent(1)), + "", + ] + + return "\n".join(list_lines), "\n".join(table_lines) + + +def render_token_type_styles(tokens: List[Token], token_types: List[str]) -> str: + """Render the CSS styles for the list of token types.""" + lines = [] + + for token in tokens: + tok_id = token.tok_id + tok_type = token_types.index(token.tok_type) + lines.extend( + [ + f'#tok-types li[data-tok="{tok_id}"]:hover,', + f'#tok-types:has([data-tok="{tok_id}"]:hover) li[data-type="{tok_type}"] {{', + f"{indent(1)}border: solid 1px;", + f"{indent(1)}background: var(--tok-highlight-color);", + "}", + "", + ] + ) + return "\n".join(lines) + + +def render_position_calculation( + name: str, token_id: str, previous: int, delta: int +) -> str: + """Render the given calculation.""" + current = previous + delta + lines = [ + f'

    ', + f"{indent(1)}{name}", + f"{indent(1)}=", + f"{indent(1)}", + f'{indent(2)}{current}', + f"{indent(2)}-", + f'{indent(2)}{previous}', + f"{indent(1)}", + f"{indent(1)}=", + f"{indent(1)}{current - previous}", + "

    ", + ] + + return "\n".join(lines) + + +def render_token_position_calcs_and_styles(tokens: list[Token]) -> Tuple[str, str]: + """Render the set of line and offset calculations.""" + calc_lines = [] + style_lines = [] + + prev_line = 1 + prev_offset = 0 + + for token in tokens: + tok_id = token.tok_id + + style_lines.extend( + [ + f'#tok-positions:has([data-tok="{tok_id}"]:hover) li[data-tok="{tok_id}"],', + f'#tok-positions:has([data-tok="{tok_id}"]:hover) td[data-tok="{tok_id}"] {{', + f"{indent(1)}border: solid 1px;", + f"{indent(1)}background: var(--tok-highlight-color);", + "}", + "", + f'#tok-positions:has([data-tok="{tok_id}"]:hover) td[data-line="{prev_line}"] {{', + f"{indent(1)}background: var(--tok-before-color);", + f"{indent(1)}color: white;", + "}", + "", + f'#tok-positions:has([data-tok="{tok_id}"]:hover) td[data-line="{prev_line + token.line}"] {{', + f"{indent(1)}background: var(--tok-after-color);", + f"{indent(1)}color: white;", + "}", + "", + f'#tok-positions:has([data-tok="{tok_id}"]:hover) p[data-tok="{tok_id}"] {{', + f"{indent(1)}display: grid;", + "}", + "", + ] + ) + + calc_lines.append( + render_position_calculation("Line", tok_id, prev_line, token.line) + ) + + if token.line > 0: + prev_line += token.line + prev_offset = 0 + + style_lines.extend( + [ + f'#tok-positions:has([data-tok="{tok_id}"]:hover) td[data-offset="{prev_offset}"] {{', + f"{indent(1)}background: var(--tok-before-color);", + f"{indent(1)}color: white;", + "}", + "", + f'#tok-positions:has([data-tok="{tok_id}"]:hover) td[data-offset="{prev_offset + token.offset}"] {{', + f"{indent(1)}background: var(--tok-after-color);", + f"{indent(1)}color: white;", + "}", + "", + ] + ) + + calc_lines.append( + render_position_calculation("Offset", tok_id, prev_offset, token.offset) + ) + prev_offset += token.offset + + return "\n".join(calc_lines), "\n".join(style_lines) + + +def render_token_modifier_calcs_and_styles( + tokens: list[Token], modifiers: Type[enum.IntFlag] +) -> Tuple[str, str]: + """Render the set of modifier calculations.""" + calc_lines = [] + style_lines = [] + mod_index = {mod.value: i for i, mod in enumerate(modifiers)} + + for token in tokens: + tok_id = token.tok_id + + style_lines.extend( + [ + f'#tok-modifiers:has([data-tok="{tok_id}"]:hover) li[data-tok="{tok_id}"] {{', + f"{indent(1)}border: solid 1px;", + f"{indent(1)}background: var(--tok-highlight-color);", + "}", + "", + ] + ) + + if len(token.tok_modifiers) > 0: + total = reduce(operator.or_, token.tok_modifiers) + sum_ = " + ".join(str(m.value) for m in token.tok_modifiers) + calc_lines.append(f'

    {total} = {sum_}

    ') + + style_lines.extend( + [ + f'#tok-modifiers:has([data-tok="{tok_id}"]:hover) #tok-modifier-calcs p[data-tok="{tok_id}"] {{', + f"{indent(1)}display: block;", + "}", + "", + ] + ) + + for modifier in token.tok_modifiers: + mod_id = mod_index[modifier.value] + style_lines.extend( + [ + f'#tok-modifiers:has([data-tok="{tok_id}"]:hover) li[data-mod="{mod_id}"],', + f'#tok-modifiers:has([data-tok="{tok_id}"]:hover) td[data-mod="{mod_id}"] {{', + f"{indent(1)}border: solid 1px;", + f"{indent(1)}background: var(--tok-highlight-color);", + "}", + "", + ] + ) + + return "\n".join(calc_lines), "\n".join(style_lines) + + +if __name__ == "__main__": + args = cli.parse_args() + + main(args.output) diff --git a/docs/source/examples/semantic-tokens.rst b/docs/source/examples/semantic-tokens.rst new file mode 100644 index 00000000..22d31dba --- /dev/null +++ b/docs/source/examples/semantic-tokens.rst @@ -0,0 +1,7 @@ +.. _example-semantic-tokens: + +Semantic Tokens +=============== + +.. example-server:: semantic_tokens.py + :start-at: import enum diff --git a/docs/source/getting-started.rst b/docs/source/getting-started.rst index d421314a..e7ad2351 100644 --- a/docs/source/getting-started.rst +++ b/docs/source/getting-started.rst @@ -98,6 +98,13 @@ Each of the following example servers are focused on implementing a particular s :octicon:`pencil` + .. grid-item-card:: Semantic Tokens + :link: /examples/semantic-tokens + :link-type: doc + :text-align: center + + :octicon:`file-binary` + .. grid-item-card:: Symbols :link: /examples/symbols :link-type: doc diff --git a/docs/source/howto.rst b/docs/source/howto.rst index dd539571..b2fb5f43 100644 --- a/docs/source/howto.rst +++ b/docs/source/howto.rst @@ -5,5 +5,6 @@ How To Guides :maxdepth: 1 Handle Invalid Data + Implement Semantic Tokens Migrate to v1 Use the pygls-playground diff --git a/docs/source/howto/implement-semantic-tokens.rst b/docs/source/howto/implement-semantic-tokens.rst new file mode 100644 index 00000000..12ab467c --- /dev/null +++ b/docs/source/howto/implement-semantic-tokens.rst @@ -0,0 +1,112 @@ +.. _howto-semantic-tokens: + +How To Implement Semantic Tokens +================================ + +Semantic Tokens can be thought of as "Syntax Highlighting++". + +Traditional syntax highlighting is usually implemented as a large collection of :mod:`regular expressions ` and can use the language's grammar rules to tell the difference between say a string, variable or function. + +However, regular expressions are not powerful enough to tell if + +- a variable is read-only +- a given function is deprecated +- a class is part of the language's standard library + +This is where the *Semantic* part of Semantic Tokens comes in. + +How are tokens represented? +--------------------------- + +Unlike most parts of the Language Server Protocol, semantic tokens are not represented by a structured object with nicely named fields. +Instead each token is represented by a sequence of 5 integers:: + + [0, 2, 1, 0, 3, 0, 4, 2, 1, 0, ...] + ^-----------^ ^-----------^ + 1st token 2nd token etc. + +In order to explain their meaning, it's probably best to work with an example. +Let's consider the following code snippet:: + + c = sqrt( + a^2 + b^2 + ) + +Token Position +-------------- + +The first three numbers are dedicated to encoding a token's posisition in the document. + +The first 2 integers encode the line and character offsets of the token, while the third encodes its length. +The trick however, is that these offsets are **relative to the position of start of the previous token**. + +*Hover over each of the tokens below to see how their offsets are computed* + +.. raw:: html + :file: tokens/positions.html + +Some additional notes + +- For the ``c`` token, there was no previous token so its position is calculated relative to ``(0, 0)`` +- For the tokens ``a`` and ``)``, moving to a new line resets the column offset, so it's calculated relative to ``0`` + +Token Types +----------- + +The 4th number represents the token's type. +A type indicates if a given token represents a string, variable, function etc. + +When a server declares it supports semantic tokens (as part of the :lsp:`initialize` request) it must send the client a :class:`~lsprotocol.types.SemanticTokensLegend` which includes a list of token types that the server will use. + +.. tip:: + + See :lsp:`semanticTokenTypes` in the specification for a list of all predefiend types. + +To encode a token's type, the 4th number should be set to the index of the corresponding type in the :attr:`SemanticTokensLegend.token_types ` list sent to the client. + +*Hover over each of the tokens below to see their corresponding type* + +.. raw:: html + :file: ./tokens/types.html + +Token Modifiers +--------------- + +So far, we have only managed to re-create traditional syntax highlighting. +It's only with the 5th and final number for the token do we get to the semantic part of semantic tokens. + +Tokens can have zero or more modifiers applied to them that provide additional context for a token, such as marking is as deprecated or read-only. +As with the token types above, a server must include a list of modifiers it is going to use as part of its :class:`~lsprotocol.types.SemanticTokensLegend`. + +.. tip:: + + See :lsp:`semanticTokenModifiers` in the specification for a list of all predefiend modifiers. + +However, since we can provide more than one modifier and we only have one number to do it with, the encoding cannot be as simple as the list index of the modifer(s) we wish to apply. + +To quote the specification: + +.. pull-quote:: + + Since a token type can have n modifiers, multiple token modifiers can be set by using bit flags, so a tokenModifier value of 3 is first viewed as binary ``0b00000011``, which means ``[tokenModifiers[0], tokenModifiers[1]]`` because bits ``0`` and ``1`` are set. + +*Hover over each of the tokens below to see how their modifiers are computed* + +.. raw:: html + :file: ./tokens/modifiers.html + + +Finally! We have managed to construct the values we need to apply semantic tokens to the snippet of code we considered at the start + +.. figure:: ../../assets/semantic-tokens-example.png + :align: center + + Our semantic tokens example implemented in VSCode + +.. seealso:: + + :ref:`Example Server ` + An example implementation of semantic tokens + + :lsp:`textDocument/semanticTokens` + Semantic tokens in the LSP Specification diff --git a/docs/source/howto/tokens/modifiers.html b/docs/source/howto/tokens/modifiers.html new file mode 100644 index 00000000..ed4587e6 --- /dev/null +++ b/docs/source/howto/tokens/modifiers.html @@ -0,0 +1,280 @@ +
    +
    +
      +
    • + Token + Line + Offset + Length + Type + Modifier +
    • +
    • + c + 0 + 0 + 1 + 0 + 8 +
    • +
    • + = + 0 + 2 + 1 + 2 + 0 +
    • +
    • + sqrt + 0 + 2 + 4 + 3 + 5 +
    • +
    • + ( + 0 + 4 + 1 + 2 + 0 +
    • +
    • + a + 1 + 2 + 1 + 0 + 0 +
    • +
    • + ^ + 0 + 1 + 1 + 2 + 0 +
    • +
    • + 2 + 0 + 1 + 1 + 1 + 0 +
    • +
    • + + + 0 + 2 + 1 + 2 + 0 +
    • +
    • + b + 0 + 2 + 1 + 0 + 2 +
    • +
    • + ^ + 0 + 1 + 1 + 2 + 0 +
    • +
    • + 2 + 0 + 1 + 1 + 1 + 0 +
    • +
    • + ) + 1 + 0 + 1 + 2 + 0 +
    • +
    +
    +
    +
      +
    • + Index + Type +
    • +
    • + 0 + deprecated +
    • +
    • + 1 + readonly +
    • +
    • + 2 + defaultLibrary +
    • +
    • + 3 + definition +
    • +
    + + + + + + + + + + + + + + + + + +
    Index3210
    2Index8421
    +
    +

    8 = 8

    +

    5 = 1 + 4

    +

    2 = 2

    +
    +
    +
    + diff --git a/docs/source/howto/tokens/positions.html b/docs/source/howto/tokens/positions.html new file mode 100644 index 00000000..cee66744 --- /dev/null +++ b/docs/source/howto/tokens/positions.html @@ -0,0 +1,827 @@ +
    +
    +
      +
    • + Token + Line + Offset + Length +
    • +
    • + c + 0 + 0 + 1 +
    • +
    • + = + 0 + 2 + 1 +
    • +
    • + sqrt + 0 + 2 + 4 +
    • +
    • + ( + 0 + 4 + 1 +
    • +
    • + a + 1 + 2 + 1 +
    • +
    • + ^ + 0 + 1 + 1 +
    • +
    • + 2 + 0 + 1 + 1 +
    • +
    • + + + 0 + 2 + 1 +
    • +
    • + b + 0 + 2 + 1 +
    • +
    • + ^ + 0 + 1 + 1 +
    • +
    • + 2 + 0 + 1 + 1 +
    • +
    • + ) + 1 + 0 + 1 +
    • +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    012345678910
    1c=sqrt(
    2a^2+b^2
    3)
    +
    +

    + Line + = + + 1 + - + 1 + + = + 0 +

    +

    + Offset + = + + 0 + - + 0 + + = + 0 +

    +

    + Line + = + + 1 + - + 1 + + = + 0 +

    +

    + Offset + = + + 2 + - + 0 + + = + 2 +

    +

    + Line + = + + 1 + - + 1 + + = + 0 +

    +

    + Offset + = + + 4 + - + 2 + + = + 2 +

    +

    + Line + = + + 1 + - + 1 + + = + 0 +

    +

    + Offset + = + + 8 + - + 4 + + = + 4 +

    +

    + Line + = + + 2 + - + 1 + + = + 1 +

    +

    + Offset + = + + 2 + - + 0 + + = + 2 +

    +

    + Line + = + + 2 + - + 2 + + = + 0 +

    +

    + Offset + = + + 3 + - + 2 + + = + 1 +

    +

    + Line + = + + 2 + - + 2 + + = + 0 +

    +

    + Offset + = + + 4 + - + 3 + + = + 1 +

    +

    + Line + = + + 2 + - + 2 + + = + 0 +

    +

    + Offset + = + + 6 + - + 4 + + = + 2 +

    +

    + Line + = + + 2 + - + 2 + + = + 0 +

    +

    + Offset + = + + 8 + - + 6 + + = + 2 +

    +

    + Line + = + + 2 + - + 2 + + = + 0 +

    +

    + Offset + = + + 9 + - + 8 + + = + 1 +

    +

    + Line + = + + 2 + - + 2 + + = + 0 +

    +

    + Offset + = + + 10 + - + 9 + + = + 1 +

    +

    + Line + = + + 3 + - + 2 + + = + 1 +

    +

    + Offset + = + + 0 + - + 0 + + = + 0 +

    +
    +
    +
    + diff --git a/docs/source/howto/tokens/types.html b/docs/source/howto/tokens/types.html new file mode 100644 index 00000000..2a3b6542 --- /dev/null +++ b/docs/source/howto/tokens/types.html @@ -0,0 +1,216 @@ +
    +
    +
      +
    • + Token + Line + Offset + Length + Type +
    • +
    • + c + 0 + 0 + 1 + 0 +
    • +
    • + = + 0 + 2 + 1 + 2 +
    • +
    • + sqrt + 0 + 2 + 4 + 3 +
    • +
    • + ( + 0 + 4 + 1 + 2 +
    • +
    • + a + 1 + 2 + 1 + 0 +
    • +
    • + ^ + 0 + 1 + 1 + 2 +
    • +
    • + 2 + 0 + 1 + 1 + 1 +
    • +
    • + + + 0 + 2 + 1 + 2 +
    • +
    • + b + 0 + 2 + 1 + 0 +
    • +
    • + ^ + 0 + 1 + 1 + 2 +
    • +
    • + 2 + 0 + 1 + 1 + 1 +
    • +
    • + ) + 1 + 0 + 1 + 2 +
    • +
    +
    +
    +
      +
    • + Index + Type +
    • +
    • + 0 + variable +
    • +
    • + 1 + number +
    • +
    • + 2 + operator +
    • +
    • + 3 + function +
    • +
    +
    +
    + diff --git a/examples/servers/json_server.py b/examples/servers/json_server.py index c3827285..5e6949e6 100644 --- a/examples/servers/json_server.py +++ b/examples/servers/json_server.py @@ -33,7 +33,6 @@ import argparse import asyncio import json -import re import time import uuid from json import JSONDecodeError @@ -204,37 +203,6 @@ async def did_open(ls, params: lsp.DidOpenTextDocumentParams): _validate(ls, params) -@json_server.feature( - lsp.TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL, - lsp.SemanticTokensLegend(token_types=["operator"], token_modifiers=[]), -) -def semantic_tokens(ls: JsonLanguageServer, params: lsp.SemanticTokensParams): - """See https://microsoft.github.io/language-server-protocol/specification#textDocument_semanticTokens - for details on how semantic tokens are encoded.""" - - TOKENS = re.compile('".*"(?=:)') - - uri = params.text_document.uri - doc = ls.workspace.get_document(uri) - - last_line = 0 - last_start = 0 - - data = [] - - for lineno, line in enumerate(doc.lines): - last_start = 0 - - for match in TOKENS.finditer(line): - start, end = match.span() - data += [(lineno - last_line), (start - last_start), (end - start), 0, 0] - - last_line = lineno - last_start = start - - return lsp.SemanticTokens(data=data) - - @json_server.feature(lsp.TEXT_DOCUMENT_INLINE_VALUE) def inline_value(params: lsp.InlineValueParams): """Returns inline value.""" diff --git a/examples/servers/semantic_tokens.py b/examples/servers/semantic_tokens.py new file mode 100644 index 00000000..54e2c3ba --- /dev/null +++ b/examples/servers/semantic_tokens.py @@ -0,0 +1,303 @@ +############################################################################ +# Copyright(c) Open Law Library. All rights reserved. # +# See ThirdPartyNotices.txt in the project root for additional notices. # +# # +# Licensed under the Apache License, Version 2.0 (the "License") # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http: // www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +############################################################################ +"""This implements the various semantic token requests from the specification + +Tokens are sent to the client as a long list of numbers, each group of 5 numbers describe +a single token. + +- The first 3 numbers describe the token's line number, character index and length, + **relative to the start of the previous token** +- Thr 4th number describes a token's type +- The 5th number specifies zero or more modifiers to apply to a token + +.. seealso:: + + :ref:`howto-semantic-tokens` + For a detailed guide on how tokens are represented +""" + +import enum +import logging +import operator +import re +from functools import reduce +from typing import Dict +from typing import List +from typing import Optional + +import attrs +from lsprotocol import types + +from pygls.server import LanguageServer +from pygls.workspace import TextDocument + + +class TokenModifier(enum.IntFlag): + deprecated = enum.auto() + readonly = enum.auto() + defaultLibrary = enum.auto() + definition = enum.auto() + + +@attrs.define +class Token: + line: int + offset: int + text: str + + tok_type: str = "" + tok_modifiers: List[TokenModifier] = attrs.field(factory=list) + + +TokenTypes = ["keyword", "variable", "function", "operator", "parameter", "type"] + +SYMBOL = re.compile(r"\w+") +OP = re.compile(r"->|[\{\}\(\)\.,+:*-=]") +SPACE = re.compile(r"\s+") + +KEYWORDS = {"type", "fn"} + + +def is_type(token: Optional[Token]) -> bool: + if token is None: + return False + + return token.text == "type" and token.tok_type == "keyword" + + +def is_fn(token: Optional[Token]) -> bool: + if token is None: + return False + + return token.text == "fn" and token.tok_type == "keyword" + + +def is_lparen(token: Optional[Token]) -> bool: + if token is None: + return False + + return token.text == "(" and token.tok_type == "operator" + + +def is_rparen(token: Optional[Token]) -> bool: + if token is None: + return False + + return token.text == ")" and token.tok_type == "operator" + + +def is_lbrace(token: Optional[Token]) -> bool: + if token is None: + return False + + return token.text == "{" and token.tok_type == "operator" + + +def is_rbrace(token: Optional[Token]) -> bool: + if token is None: + return False + + return token.text == "}" and token.tok_type == "operator" + + +def is_colon(token: Optional[Token]) -> bool: + if token is None: + return False + + return token.text == ":" and token.tok_type == "operator" + + +class SemanticTokensServer(LanguageServer): + """Language server demonstrating the semantic token methods from the LSP + specification.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.tokens: Dict[str, List[Token]] = {} + + def parse(self, doc: TextDocument): + """Convert the given document into a list of tokens""" + tokens = self.lex(doc) + self.classify_tokens(tokens) + + # logging.info("%s", tokens) + self.tokens[doc.uri] = tokens + + def classify_tokens(self, tokens: List[Token]): + """Given a list of tokens, determine their type and modifiers.""" + + def prev(idx): + """Get the previous token, if possible""" + if idx < 0: + return None + + return tokens[idx - 1] + + def next(idx): + """Get the next token, if possible""" + if idx >= len(tokens) - 1: + return None + + return tokens[idx + 1] + + in_brace = False + in_paren = False + + for idx, token in enumerate(tokens): + if token.tok_type == "operator": + if is_lparen(token): + in_paren = True + + elif is_rparen(token): + in_paren = False + + elif is_lbrace(token): + in_brace = True + + elif is_rbrace(token): + in_brace = False + + continue + + if token.text in KEYWORDS: + token.tok_type = "keyword" + + elif token.text[0].isupper(): + token.tok_type = "type" + + if is_type(prev(idx)): + token.tok_modifiers.append(TokenModifier.definition) + + elif is_fn(prev(idx)) or is_lparen(next(idx)): + token.tok_type = "function" + token.tok_modifiers.append(TokenModifier.definition) + + elif is_colon(next(idx)) and in_brace: + token.tok_type = "parameter" + + elif is_colon(prev(idx)) and in_paren: + token.tok_type = "type" + token.tok_modifiers.append(TokenModifier.defaultLibrary) + + else: + token.tok_type = "variable" + + def lex(self, doc: TextDocument) -> List[Token]: + """Convert the given document into a list of tokens""" + tokens = [] + + prev_line = 0 + prev_offset = 0 + + for current_line, line in enumerate(doc.lines): + prev_offset = current_offset = 0 + chars_left = len(line) + + while line: + if (match := SPACE.match(line)) is not None: + # Skip whitespace + current_offset += len(match.group(0)) + line = line[match.end() :] + + elif (match := SYMBOL.match(line)) is not None: + tokens.append( + Token( + line=current_line - prev_line, + offset=current_offset - prev_offset, + text=match.group(0), + ) + ) + + line = line[match.end() :] + prev_offset = current_offset + prev_line = current_line + current_offset += len(match.group(0)) + + elif (match := OP.match(line)) is not None: + tokens.append( + Token( + line=current_line - prev_line, + offset=current_offset - prev_offset, + text=match.group(0), + tok_type="operator", + ) + ) + + line = line[match.end() :] + prev_offset = current_offset + prev_line = current_line + current_offset += len(match.group(0)) + + else: + raise RuntimeError(f"No match: {line!r}") + + # Make sure we don't hit an infinite loop + if (n := len(line)) == chars_left: + raise RuntimeError("Inifite loop detected") + else: + chars_left = n + + return tokens + + +server = SemanticTokensServer("semantic-tokens-server", "v1") + + +@server.feature(types.TEXT_DOCUMENT_DID_OPEN) +def did_open(ls: SemanticTokensServer, params: types.DidOpenTextDocumentParams): + """Parse each document when it is opened""" + doc = ls.workspace.get_text_document(params.text_document.uri) + ls.parse(doc) + + +@server.feature(types.TEXT_DOCUMENT_DID_CHANGE) +def did_change(ls: SemanticTokensServer, params: types.DidOpenTextDocumentParams): + """Parse each document when it is changed""" + doc = ls.workspace.get_text_document(params.text_document.uri) + ls.parse(doc) + + +@server.feature( + types.TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL, + types.SemanticTokensLegend( + token_types=TokenTypes, + token_modifiers=[m.name for m in TokenModifier], + ), +) +def semantic_tokens_full(ls: SemanticTokensServer, params: types.SemanticTokensParams): + """Return the semantic tokens for the entire document""" + data = [] + tokens = ls.tokens.get(params.text_document.uri, []) + + for token in tokens: + data.extend( + [ + token.line, + token.offset, + len(token.text), + TokenTypes.index(token.tok_type), + reduce(operator.or_, token.tok_modifiers, 0), + ] + ) + + return types.SemanticTokens(data=data) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format="%(message)s") + + server.start_io() diff --git a/tests/e2e/test_semantic_tokens.py b/tests/e2e/test_semantic_tokens.py new file mode 100644 index 00000000..2058d39a --- /dev/null +++ b/tests/e2e/test_semantic_tokens.py @@ -0,0 +1,121 @@ +############################################################################ +# Copyright(c) Open Law Library. All rights reserved. # +# See ThirdPartyNotices.txt in the project root for additional notices. # +# # +# Licensed under the Apache License, Version 2.0 (the "License") # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http: // www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +############################################################################ +from __future__ import annotations + +import typing + +import pytest +import pytest_asyncio +from lsprotocol import types + +if typing.TYPE_CHECKING: + from typing import List, Tuple + + from pygls.lsp.client import BaseLanguageClient + + +@pytest_asyncio.fixture(scope="module") +async def semantic_tokens(get_client_for): + async for result in get_client_for("semantic_tokens.py"): + yield result + + +@pytest.mark.parametrize( + "text,expected", + [ + # Just a handful of cases to check we've got the basics right + ("fn", [0, 0, 2, 0, 0]), + ("type", [0, 0, 4, 0, 0]), + ("Rectangle", [0, 0, 9, 5, 0]), + ( + "type Rectangle", + [ + # fmt: off + 0, 0, 4, 0, 0, + 0, 5, 9, 5, 8, + # fmt: on + ], + ), + ( + "fn area", + [ + # fmt: off + 0, 0, 2, 0, 0, + 0, 3, 4, 2, 8, + # fmt: on + ], + ), + ( + "fn\n area", + [ + # fmt: off + 0, 0, 2, 0, 0, + 1, 1, 4, 2, 8, + # fmt: on + ], + ), + ], +) +@pytest.mark.asyncio(scope="module") +async def test_semantic_tokens_full( + semantic_tokens: Tuple[BaseLanguageClient, types.InitializeResult], + uri_for, + path_for, + text: str, + expected: List[int], +): + """Ensure that the example semantic tokens server is working as expected.""" + client, initialize_result = semantic_tokens + + semantic_tokens_options = initialize_result.capabilities.semantic_tokens_provider + assert semantic_tokens_options.full is True + + legend = semantic_tokens_options.legend + assert legend.token_types == [ + "keyword", + "variable", + "function", + "operator", + "parameter", + "type", + ] + assert legend.token_modifiers == [ + "deprecated", + "readonly", + "defaultLibrary", + "definition", + ] + + test_uri = uri_for("code.txt") + + client.text_document_did_open( + types.DidOpenTextDocumentParams( + types.TextDocumentItem( + uri=test_uri, + language_id="plaintext", + version=0, + text=text, + ) + ) + ) + + response = await client.text_document_semantic_tokens_full_async( + types.SemanticTokensParams( + text_document=types.TextDocumentIdentifier(uri=test_uri), + ) + ) + assert response.data == expected diff --git a/tests/lsp/semantic_tokens/__init__.py b/tests/lsp/semantic_tokens/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/lsp/semantic_tokens/test_delta_missing_legend.py b/tests/lsp/semantic_tokens/test_delta_missing_legend.py deleted file mode 100644 index a3069da5..00000000 --- a/tests/lsp/semantic_tokens/test_delta_missing_legend.py +++ /dev/null @@ -1,92 +0,0 @@ -############################################################################ -# Copyright(c) Open Law Library. All rights reserved. # -# See ThirdPartyNotices.txt in the project root for additional notices. # -# # -# Licensed under the Apache License, Version 2.0 (the "License") # -# you may not use this file except in compliance with the License. # -# You may obtain a copy of the License at # -# # -# http: // www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -############################################################################ -from typing import Optional, Union - -from lsprotocol.types import ( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL_DELTA, -) -from lsprotocol.types import ( - SemanticTokens, - SemanticTokensDeltaParams, - SemanticTokensLegend, - SemanticTokensPartialResult, - SemanticTokensOptionsFullType1, - TextDocumentIdentifier, -) - -from ...conftest import ClientServer - - -class ConfiguredLS(ClientServer): - def __init__(self): - super().__init__() - - @self.server.feature( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL_DELTA, - SemanticTokensLegend( - token_types=["keyword", "operator"], token_modifiers=["readonly"] - ), - ) - def f( - params: SemanticTokensDeltaParams, - ) -> Union[SemanticTokensPartialResult, Optional[SemanticTokens]]: - if params.text_document.uri == "file://return.tokens": - return SemanticTokens(data=[0, 0, 3, 0, 0]) - - -@ConfiguredLS.decorate() -def test_capabilities(client_server): - _, server = client_server - capabilities = server.server_capabilities - - provider = capabilities.semantic_tokens_provider - assert provider.full == SemanticTokensOptionsFullType1(delta=True) - assert provider.legend.token_types == [ - "keyword", - "operator", - ] - assert provider.legend.token_modifiers == ["readonly"] - - -@ConfiguredLS.decorate() -def test_semantic_tokens_full_delta_return_tokens(client_server): - client, _ = client_server - response = client.lsp.send_request( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL_DELTA, - SemanticTokensDeltaParams( - text_document=TextDocumentIdentifier(uri="file://return.tokens"), - previous_result_id="id", - ), - ).result() - - assert response - - assert response.data == [0, 0, 3, 0, 0] - - -@ConfiguredLS.decorate() -def test_semantic_tokens_full_delta_return_none(client_server): - client, _ = client_server - response = client.lsp.send_request( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL_DELTA, - SemanticTokensDeltaParams( - text_document=TextDocumentIdentifier(uri="file://return.none"), - previous_result_id="id", - ), - ).result() - - assert response is None diff --git a/tests/lsp/semantic_tokens/test_delta_missing_legend_none.py b/tests/lsp/semantic_tokens/test_delta_missing_legend_none.py deleted file mode 100644 index 6f4fa17d..00000000 --- a/tests/lsp/semantic_tokens/test_delta_missing_legend_none.py +++ /dev/null @@ -1,48 +0,0 @@ -############################################################################ -# Copyright(c) Open Law Library. All rights reserved. # -# See ThirdPartyNotices.txt in the project root for additional notices. # -# # -# Licensed under the Apache License, Version 2.0 (the "License") # -# you may not use this file except in compliance with the License. # -# You may obtain a copy of the License at # -# # -# http: // www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -############################################################################ -from typing import Optional, Union - -from lsprotocol.types import ( - SemanticTokens, - SemanticTokensDeltaParams, - SemanticTokensPartialResult, -) -from lsprotocol.types import ( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL_DELTA, -) - -from ...conftest import ClientServer - - -class ConfiguredLS(ClientServer): - def __init__(self): - super().__init__() - - @self.server.feature(TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL_DELTA) - def f( - params: SemanticTokensDeltaParams, - ) -> Union[SemanticTokensPartialResult, Optional[SemanticTokens]]: - return SemanticTokens(data=[0, 0, 3, 0, 0]) - - -@ConfiguredLS.decorate() -def test_capabilities(client_server): - _, server = client_server - capabilities = server.server_capabilities - - assert capabilities.semantic_tokens_provider is None - assert capabilities.semantic_tokens_provider is None diff --git a/tests/lsp/semantic_tokens/test_full_missing_legend.py b/tests/lsp/semantic_tokens/test_full_missing_legend.py deleted file mode 100644 index e18dbde3..00000000 --- a/tests/lsp/semantic_tokens/test_full_missing_legend.py +++ /dev/null @@ -1,46 +0,0 @@ -############################################################################ -# Copyright(c) Open Law Library. All rights reserved. # -# See ThirdPartyNotices.txt in the project root for additional notices. # -# # -# Licensed under the Apache License, Version 2.0 (the "License") # -# you may not use this file except in compliance with the License. # -# You may obtain a copy of the License at # -# # -# http: // www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -############################################################################ -from typing import Optional, Union - -from lsprotocol.types import ( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL, -) -from lsprotocol.types import ( - SemanticTokens, - SemanticTokensPartialResult, - SemanticTokensParams, -) - -from ...conftest import ClientServer - - -class ConfiguredLS(ClientServer): - def __init__(self): - super().__init__() - - @self.server.feature(TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL) - def f( - params: SemanticTokensParams, - ) -> Union[SemanticTokensPartialResult, Optional[SemanticTokens]]: - return SemanticTokens(data=[0, 0, 3, 0, 0]) - - -@ConfiguredLS.decorate() -def test_capabilities(client_server): - _, server = client_server - capabilities = server.server_capabilities - assert capabilities.semantic_tokens_provider is None diff --git a/tests/lsp/semantic_tokens/test_range.py b/tests/lsp/semantic_tokens/test_range.py deleted file mode 100644 index a65504b6..00000000 --- a/tests/lsp/semantic_tokens/test_range.py +++ /dev/null @@ -1,103 +0,0 @@ -############################################################################ -# Copyright(c) Open Law Library. All rights reserved. # -# See ThirdPartyNotices.txt in the project root for additional notices. # -# # -# Licensed under the Apache License, Version 2.0 (the "License") # -# you may not use this file except in compliance with the License. # -# You may obtain a copy of the License at # -# # -# http: // www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -############################################################################ -from typing import Optional, Union - -from lsprotocol.types import ( - TEXT_DOCUMENT_SEMANTIC_TOKENS_RANGE, -) -from lsprotocol.types import ( - Position, - Range, - SemanticTokens, - SemanticTokensLegend, - SemanticTokensPartialResult, - SemanticTokensRangeParams, - TextDocumentIdentifier, -) - -from ...conftest import ClientServer - -SemanticTokenReturnType = Optional[ - Union[SemanticTokensPartialResult, Optional[SemanticTokens]] -] - - -class ConfiguredLS(ClientServer): - def __init__(self): - super().__init__() - - @self.server.feature( - TEXT_DOCUMENT_SEMANTIC_TOKENS_RANGE, - SemanticTokensLegend( - token_types=["keyword", "operator"], token_modifiers=["readonly"] - ), - ) - def f( - params: SemanticTokensRangeParams, - ) -> SemanticTokenReturnType: - if params.text_document.uri == "file://return.tokens": - return SemanticTokens(data=[0, 0, 3, 0, 0]) - - -@ConfiguredLS.decorate() -def test_capabilities(client_server): - _, server = client_server - capabilities = server.server_capabilities - - provider = capabilities.semantic_tokens_provider - assert provider.range - assert provider.legend.token_types == [ - "keyword", - "operator", - ] - assert provider.legend.token_modifiers == ["readonly"] - - -@ConfiguredLS.decorate() -def test_semantic_tokens_range_return_tokens(client_server): - client, _ = client_server - response = client.lsp.send_request( - TEXT_DOCUMENT_SEMANTIC_TOKENS_RANGE, - SemanticTokensRangeParams( - text_document=TextDocumentIdentifier(uri="file://return.tokens"), - range=Range( - start=Position(line=0, character=0), - end=Position(line=10, character=80), - ), - ), - ).result() - - assert response - - assert response.data == [0, 0, 3, 0, 0] - - -@ConfiguredLS.decorate() -def test_semantic_tokens_range_return_none(client_server): - client, _ = client_server - response = client.lsp.send_request( - TEXT_DOCUMENT_SEMANTIC_TOKENS_RANGE, - SemanticTokensRangeParams( - text_document=TextDocumentIdentifier(uri="file://return.none"), - range=Range( - start=Position(line=0, character=0), - end=Position(line=10, character=80), - ), - ), - ).result() - - assert response is None diff --git a/tests/lsp/semantic_tokens/test_range_missing_legends.py b/tests/lsp/semantic_tokens/test_range_missing_legends.py deleted file mode 100644 index 69780efa..00000000 --- a/tests/lsp/semantic_tokens/test_range_missing_legends.py +++ /dev/null @@ -1,47 +0,0 @@ -############################################################################ -# Copyright(c) Open Law Library. All rights reserved. # -# See ThirdPartyNotices.txt in the project root for additional notices. # -# # -# Licensed under the Apache License, Version 2.0 (the "License") # -# you may not use this file except in compliance with the License. # -# You may obtain a copy of the License at # -# # -# http: // www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -############################################################################ -from typing import Optional, Union - -from lsprotocol.types import ( - TEXT_DOCUMENT_SEMANTIC_TOKENS_RANGE, -) -from lsprotocol.types import ( - SemanticTokens, - SemanticTokensParams, - SemanticTokensPartialResult, -) - -from ...conftest import ClientServer - - -class ConfiguredLS(ClientServer): - def __init__(self): - super().__init__() - - @self.server.feature(TEXT_DOCUMENT_SEMANTIC_TOKENS_RANGE) - def f( - params: SemanticTokensParams, - ) -> Union[SemanticTokensPartialResult, Optional[SemanticTokens]]: - return SemanticTokens(data=[0, 0, 3, 0, 0]) - - -@ConfiguredLS.decorate() -def test_capabilities(client_server): - _, server = client_server - capabilities = server.server_capabilities - - assert capabilities.semantic_tokens_provider is None diff --git a/tests/lsp/semantic_tokens/test_semantic_tokens_full.py b/tests/lsp/semantic_tokens/test_semantic_tokens_full.py deleted file mode 100644 index dba9fa68..00000000 --- a/tests/lsp/semantic_tokens/test_semantic_tokens_full.py +++ /dev/null @@ -1,93 +0,0 @@ -############################################################################ -# Copyright(c) Open Law Library. All rights reserved. # -# See ThirdPartyNotices.txt in the project root for additional notices. # -# # -# Licensed under the Apache License, Version 2.0 (the "License") # -# you may not use this file except in compliance with the License. # -# You may obtain a copy of the License at # -# # -# http: // www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -############################################################################ -from typing import Optional, Union - -from lsprotocol.types import ( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL, -) -from lsprotocol.types import ( - SemanticTokens, - SemanticTokensLegend, - SemanticTokensParams, - SemanticTokensPartialResult, - TextDocumentIdentifier, -) - -from ...conftest import ClientServer - -SemanticTokenReturnType = Optional[ - Union[SemanticTokensPartialResult, Optional[SemanticTokens]] -] - - -class ConfiguredLS(ClientServer): - def __init__(self): - super().__init__() - - @self.server.feature( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL, - SemanticTokensLegend( - token_types=["keyword", "operator"], token_modifiers=["readonly"] - ), - ) - def f( - params: SemanticTokensParams, - ) -> SemanticTokenReturnType: - if params.text_document.uri == "file://return.tokens": - return SemanticTokens(data=[0, 0, 3, 0, 0]) - - -@ConfiguredLS.decorate() -def test_capabilities(client_server): - _, server = client_server - capabilities = server.server_capabilities - - provider = capabilities.semantic_tokens_provider - assert provider.full - assert provider.legend.token_types == [ - "keyword", - "operator", - ] - assert provider.legend.token_modifiers == ["readonly"] - - -@ConfiguredLS.decorate() -def test_semantic_tokens_full_return_tokens(client_server): - client, _ = client_server - response = client.lsp.send_request( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL, - SemanticTokensParams( - text_document=TextDocumentIdentifier(uri="file://return.tokens") - ), - ).result() - - assert response - - assert response.data == [0, 0, 3, 0, 0] - - -@ConfiguredLS.decorate() -def test_semantic_tokens_full_return_none(client_server): - client, _ = client_server - response = client.lsp.send_request( - TEXT_DOCUMENT_SEMANTIC_TOKENS_FULL, - SemanticTokensParams( - text_document=TextDocumentIdentifier(uri="file://return.none") - ), - ).result() - - assert response is None