Skip to content

Commit

Permalink
features/#373 Allow any section name on toplevel(#373)
Browse files Browse the repository at this point in the history
Allocation of all top-level sections into one token and support for unknown top-level sections and added tests
  • Loading branch information
Kirill Golovin authored and Kirill Golovin committed Aug 6, 2021
1 parent 698a405 commit d97f6d9
Show file tree
Hide file tree
Showing 24 changed files with 369 additions and 216 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Released on ...

### Changed
- Show argument preview for int indexes completion in `output[i]` (see [#378](https://github.com/JetBrains-Research/snakecharm/issues/378))
- Allocation of all top-level sections into one token and support for unknown top-level sections (see [#373](https://github.com/JetBrains-Research/snakecharm/issues/373))
- TODO (see [#NNN](https://github.com/JetBrains-Research/snakecharm/issues/NNN))

### Fixed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import com.jetbrains.python.psi.*
import com.jetbrains.snakecharm.codeInsight.SnakemakeAPI
import com.jetbrains.snakecharm.codeInsight.SnakemakeAPI.RULE_OR_CHECKPOINT_ARGS_SECTION_KEYWORDS
import com.jetbrains.snakecharm.lang.parser.SnakemakeLexer.Companion.KEYWORDS
import com.jetbrains.snakecharm.lang.parser.SnakemakeLexer.Companion.TOPLEVEL_KEYWORDS
import com.jetbrains.snakecharm.lang.psi.*

open class SmkStatementMover : PyStatementMover() {
Expand Down Expand Up @@ -161,7 +162,8 @@ open class SmkStatementMover : PyStatementMover() {
}

if ((((elementToMove is SmkRuleOrCheckpointArgsSection &&
elementToMove.sectionKeyword !in KEYWORDS) ||
elementToMove.sectionKeyword !in KEYWORDS &&
elementToMove.sectionKeyword!! !in TOPLEVEL_KEYWORDS) ||
(elementToMove is SmkRunSection)) &&
((!down && statements.first() == elementToMove)
|| (down && statements.last() == elementToMove))) ||
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import com.intellij.patterns.PlatformPatterns
import com.intellij.patterns.PlatformPatterns.psiElement
import com.intellij.patterns.PsiElementPattern
import com.intellij.patterns.StandardPatterns
import com.intellij.profile.codeInspection.InspectionProfileManager
import com.intellij.psi.PsiComment
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
Expand All @@ -20,7 +19,6 @@ import com.jetbrains.python.codeInsight.completion.PythonLookupElement
import com.jetbrains.python.psi.*
import com.jetbrains.snakecharm.codeInsight.SnakemakeAPI.RULE_OR_CHECKPOINT_SECTION_KEYWORDS
import com.jetbrains.snakecharm.codeInsight.SnakemakeAPI.SUBWORKFLOW_SECTIONS_KEYWORDS
import com.jetbrains.snakecharm.inspections.SmkUnrecognizedSectionInspection
import com.jetbrains.snakecharm.lang.SnakemakeLanguageDialect
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.RULE_LIKE
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATORS_WO_RULE_LIKE
Expand Down Expand Up @@ -92,17 +90,18 @@ object WorkflowTopLevelKeywordsProvider : CompletionProvider<CompletionParameter
return
}

val tokenType2Name = SnakemakeLexer.KEYWORDS
.map { (k, v) -> v to k }
.toMap()

val tokenType2Name = SnakemakeLexer.KEYWORDS_2_TEXT
val colonAndWhiteSpaceTailKeys = WORKFLOW_TOPLEVEL_DECORATORS_WO_RULE_LIKE.types.map { tt ->
tokenType2Name[tt]!!
} + SnakemakeLexer.TOPLEVEL_KEYWORDS
val spaceTailKeys = RULE_LIKE.types.map { tt ->
tokenType2Name[tt]!!
}
listOf(
WORKFLOW_TOPLEVEL_DECORATORS_WO_RULE_LIKE to ColonAndWhiteSpaceTail,
RULE_LIKE to TailType.SPACE
colonAndWhiteSpaceTailKeys to ColonAndWhiteSpaceTail,
spaceTailKeys to TailType.SPACE
).forEach { (tokenSet, tail) ->
tokenSet.types.forEach { tt ->
val s = tokenType2Name[tt]!!

tokenSet.forEach { s ->
result.addElement(
SmkCompletionUtil.createPrioritizedLookupElement(
TailTypeDecorator.withTail(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import com.jetbrains.python.PyTokenTypes
import com.jetbrains.python.findUsages.PythonFindUsagesProvider
import com.jetbrains.snakecharm.codeInsight.resolve.SmkFakePsiElement
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATORS
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD
import com.jetbrains.snakecharm.lang.parser.SnakemakeLexer
import com.jetbrains.snakecharm.lang.psi.SmkCheckPoint
import com.jetbrains.snakecharm.lang.psi.SmkRule
Expand All @@ -17,7 +18,7 @@ import com.jetbrains.snakecharm.stringLanguage.lang.psi.SmkSLReferenceExpression
/**
* Provides correct usages types for Snakemake specific elements, should be executed before Python impl.
*/
class SmkAndSmkSLFindUsagesProvider: PythonFindUsagesProvider() {
class SmkAndSmkSLFindUsagesProvider : PythonFindUsagesProvider() {
override fun getWordsScanner() = SmkWordsScanner()

override fun getNodeText(element: PsiElement, useFullName: Boolean) = getDescriptiveName(element)
Expand All @@ -41,13 +42,17 @@ class SmkAndSmkSLFindUsagesProvider: PythonFindUsagesProvider() {
override fun getHelpId(psiElement: PsiElement) = null

override fun canFindUsagesFor(element: PsiElement): Boolean {
return element is SmkSection || element is SmkFakePsiElement || element is SmkSLReferenceExpressionImpl
return element is SmkSection || element is SmkFakePsiElement || element is SmkSLReferenceExpressionImpl
}
}

class SmkWordsScanner : DefaultWordsScanner(
SnakemakeLexer(),
TokenSet.orSet(WORKFLOW_TOPLEVEL_DECORATORS, TokenSet.create(PyTokenTypes.IDENTIFIER)),
TokenSet.create(PyTokenTypes.END_OF_LINE_COMMENT),
PyTokenTypes.STRING_NODES
SnakemakeLexer(),
TokenSet.orSet(
WORKFLOW_TOPLEVEL_DECORATORS,
TokenSet.create(WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD),
TokenSet.create(PyTokenTypes.IDENTIFIER)
),
TokenSet.create(PyTokenTypes.END_OF_LINE_COMMENT),
PyTokenTypes.STRING_NODES
)
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package com.jetbrains.snakecharm.lang
import com.intellij.psi.tree.TokenSet
import com.jetbrains.python.PythonDialectsTokenSetContributorBase
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATORS
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD
import com.jetbrains.snakecharm.lang.psi.elementTypes.SmkElementTypes
import com.jetbrains.snakecharm.lang.psi.elementTypes.SmkElementTypes.SMK_PY_REFERENCE_EXPRESSION
import com.jetbrains.snakecharm.lang.psi.elementTypes.SmkStubElementTypes
Expand All @@ -13,21 +14,21 @@ import com.jetbrains.snakecharm.lang.psi.elementTypes.SmkStubElementTypes
*/
class SmkTokenSetContributor : PythonDialectsTokenSetContributorBase() {
override fun getStatementTokens() = TokenSet.create(
SmkElementTypes.WORKFLOW_ARGS_SECTION_STATEMENT,
SmkElementTypes.WORKFLOW_LOCALRULES_SECTION_STATEMENT,
SmkElementTypes.WORKFLOW_RULEORDER_SECTION_STATEMENT,
SmkElementTypes.WORKFLOW_PY_BLOCK_SECTION_STATEMENT,
SmkElementTypes.WORKFLOW_ARGS_SECTION_STATEMENT,
SmkElementTypes.WORKFLOW_LOCALRULES_SECTION_STATEMENT,
SmkElementTypes.WORKFLOW_RULEORDER_SECTION_STATEMENT,
SmkElementTypes.WORKFLOW_PY_BLOCK_SECTION_STATEMENT,

SmkStubElementTypes.RULE_DECLARATION_STATEMENT,
SmkStubElementTypes.CHECKPOINT_DECLARATION_STATEMENT,
SmkElementTypes.RULE_OR_CHECKPOINT_ARGS_SECTION_STATEMENT,
SmkStubElementTypes.RULE_DECLARATION_STATEMENT,
SmkStubElementTypes.CHECKPOINT_DECLARATION_STATEMENT,
SmkElementTypes.RULE_OR_CHECKPOINT_ARGS_SECTION_STATEMENT,

SmkStubElementTypes.SUBWORKFLOW_DECLARATION_STATEMENT,
SmkElementTypes.SUBWORKFLOW_ARGS_SECTION_STATEMENT
SmkStubElementTypes.SUBWORKFLOW_DECLARATION_STATEMENT,
SmkElementTypes.SUBWORKFLOW_ARGS_SECTION_STATEMENT
)

override fun getExpressionTokens() = TokenSet.create(
SmkElementTypes.REFERENCE_EXPRESSION, SMK_PY_REFERENCE_EXPRESSION
SmkElementTypes.REFERENCE_EXPRESSION, SMK_PY_REFERENCE_EXPRESSION
)

/**
Expand All @@ -41,9 +42,12 @@ class SmkTokenSetContributor : PythonDialectsTokenSetContributorBase() {

override fun getReferenceExpressionTokens() = TokenSet.create(SMK_PY_REFERENCE_EXPRESSION)

override fun getFunctionDeclarationTokens()= TokenSet.EMPTY!!
override fun getFunctionDeclarationTokens() = TokenSet.EMPTY!!

override fun getUnbalancedBracesRecoveryTokens(): TokenSet {
return WORKFLOW_TOPLEVEL_DECORATORS
return TokenSet.orSet(
WORKFLOW_TOPLEVEL_DECORATORS,
TokenSet.create(WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD)
)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,18 @@ package com.jetbrains.snakecharm.lang.highlighter

import com.jetbrains.python.lexer.PythonHighlightingLexer
import com.jetbrains.python.psi.LanguageLevel
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD
import com.jetbrains.snakecharm.lang.parser.SnakemakeLexer

/**
* @author Roman.Chernyatchik
* @date 2018-12-31
*/
class SnakemakeHighlightingLexer(level: LanguageLevel): PythonHighlightingLexer(level) {
override fun getTokenType() = SnakemakeLexer.KEYWORDS[tokenText] ?: super.getTokenType()
class SnakemakeHighlightingLexer(level: LanguageLevel) : PythonHighlightingLexer(level) {
override fun getTokenType() =
if (tokenText in SnakemakeLexer.TOPLEVEL_KEYWORDS) {
WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD
} else {
SnakemakeLexer.KEYWORDS[tokenText] ?: super.getTokenType()
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,47 +10,49 @@ import com.jetbrains.python.psi.PyElementType
import com.jetbrains.snakecharm.SnakemakeBundle
import com.jetbrains.snakecharm.lang.SnakemakeNames
import com.jetbrains.snakecharm.lang.parser.SmkTokenTypes.RULE_OR_CHECKPOINT
import com.jetbrains.snakecharm.lang.parser.SnakemakeLexer.Companion.TOPLEVEL_KEYWORDS
import com.jetbrains.snakecharm.lang.psi.elementTypes.SmkElementTypes
import com.jetbrains.snakecharm.lang.psi.elementTypes.SmkStubElementTypes.*
import java.util.*


/**
* @author Roman.Chernyatchik
* @date 2018-12-31
*/
class SmkStatementParsing(
context: SmkParserContext
context: SmkParserContext
) : StatementParsing(context) {

private val ruleSectionParsingData = SectionParsingData(
declaration = RULE_DECLARATION_STATEMENT,
name = "rule",
parameterListStatement = SmkElementTypes.RULE_OR_CHECKPOINT_ARGS_SECTION_STATEMENT,
sectionKeyword= SmkTokenTypes.RULE_KEYWORD
declaration = RULE_DECLARATION_STATEMENT,
name = "rule",
parameterListStatement = SmkElementTypes.RULE_OR_CHECKPOINT_ARGS_SECTION_STATEMENT,
sectionKeyword = SmkTokenTypes.RULE_KEYWORD
)

private val checkpointSectionParsingData = SectionParsingData(
declaration = CHECKPOINT_DECLARATION_STATEMENT,
name = "checkpoint",
parameterListStatement = SmkElementTypes.RULE_OR_CHECKPOINT_ARGS_SECTION_STATEMENT,
sectionKeyword= SmkTokenTypes.CHECKPOINT_KEYWORD
declaration = CHECKPOINT_DECLARATION_STATEMENT,
name = "checkpoint",
parameterListStatement = SmkElementTypes.RULE_OR_CHECKPOINT_ARGS_SECTION_STATEMENT,
sectionKeyword = SmkTokenTypes.CHECKPOINT_KEYWORD
)

private val subworkflowSectionParsingData = SectionParsingData(
declaration = SUBWORKFLOW_DECLARATION_STATEMENT,
name = "subworkflow",
parameterListStatement = SmkElementTypes.SUBWORKFLOW_ARGS_SECTION_STATEMENT,
sectionKeyword= SmkTokenTypes.SUBWORKFLOW_KEYWORD
declaration = SUBWORKFLOW_DECLARATION_STATEMENT,
name = "subworkflow",
parameterListStatement = SmkElementTypes.SUBWORKFLOW_ARGS_SECTION_STATEMENT,
sectionKeyword = SmkTokenTypes.SUBWORKFLOW_KEYWORD
)

override fun getReferenceType() = SmkElementTypes.SMK_PY_REFERENCE_EXPRESSION

private fun getSectionParsingData(tokenType: IElementType) =
when {
tokenType === SmkTokenTypes.SUBWORKFLOW_KEYWORD -> subworkflowSectionParsingData
tokenType === SmkTokenTypes.CHECKPOINT_KEYWORD -> checkpointSectionParsingData
else -> ruleSectionParsingData
}
when {
tokenType === SmkTokenTypes.SUBWORKFLOW_KEYWORD -> subworkflowSectionParsingData
tokenType === SmkTokenTypes.CHECKPOINT_KEYWORD -> checkpointSectionParsingData
else -> ruleSectionParsingData
}

override fun getParsingContext() = myContext as SmkParserContext

Expand All @@ -63,19 +65,24 @@ class SmkStatementParsing(
val actualToken = SnakemakeLexer.KEYWORDS[myBuilder.tokenText!!]
if (actualToken != null) {
myBuilder.remapCurrentToken(actualToken)
} else if (isToplevelDecoratorKeyword()) {
myBuilder.remapCurrentToken(SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD)
}
}

val tt = myBuilder.tokenType

if (tt !in SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATORS) {
if (tt !in SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATORS &&
tt != SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD
) {
// XXX: maybe also allow: `some_new_section: ` case, i.e. indentifier with following ':' in order
// to support any section here
super.parseStatement()
return
}
when {
tt in SmkTokenTypes.RULE_LIKE -> parseRuleLikeDeclaration(getSectionParsingData(tt!!))
tt in SmkTokenTypes.WORKFLOW_TOPLEVEL_PARAMLISTS_DECORATOR_KEYWORDS -> {
tt === SmkTokenTypes.WORKFLOW_TOPLEVEL_DECORATOR_KEYWORD -> {
val workflowParam = myBuilder.mark()
nextToken()
val result = parsingContext.expressionParser.parseRuleLikeSectionArgumentList()
Expand All @@ -89,9 +96,9 @@ class SmkStatementParsing(
nextToken()

val res = parsingContext.expressionParser.parseArgumentList(
",", PyTokenTypes.COMMA,
SnakemakeBundle.message("PARSE.expected.identifier"),
this::parseIdentifier
",", PyTokenTypes.COMMA,
SnakemakeBundle.message("PARSE.expected.identifier"),
this::parseIdentifier
)

if (!res) {
Expand All @@ -103,14 +110,14 @@ class SmkStatementParsing(
nextToken()
}
}
tt === SmkTokenTypes.WORKFLOW_RULEORDER_KEYWORD -> {
tt === SmkTokenTypes.WORKFLOW_RULEORDER_KEYWORD -> {
val workflowParam = myBuilder.mark()
nextToken()

val res = parsingContext.expressionParser.parseArgumentList(
">", PyTokenTypes.GT,
SnakemakeBundle.message("PARSE.expected.identifier"),
this::parseIdentifier
">", PyTokenTypes.GT,
SnakemakeBundle.message("PARSE.expected.identifier"),
this::parseIdentifier
)

if (!res) {
Expand Down Expand Up @@ -154,7 +161,13 @@ class SmkStatementParsing(

// XXX at the moment we continue parsing rule even if colon missed, probably better
// XXX to drop rule and scroll up to next STATEMENT_BREAK/RULE/CHECKPOINT/other toplevel keyword or eof()
checkMatches(PyTokenTypes.COLON, "${section.name.capitalize()} name identifier or ':' expected") // bundle
checkMatches(PyTokenTypes.COLON,
"${
section.name.replaceFirstChar {
if (it.isLowerCase()) it.titlecase(Locale.getDefault()) else it.toString()
}
} name identifier or ':' expected"
) // bundle

val ruleStatements = myBuilder.mark()

Expand Down Expand Up @@ -241,7 +254,11 @@ class SmkStatementParsing(
val ruleParam = myBuilder.mark()

if (myBuilder.tokenType != PyTokenTypes.IDENTIFIER) {
myBuilder.error("${section.name.capitalize()} parameter identifier is expected") // bundle
myBuilder.error("${
section.name.replaceFirstChar {
if (it.isLowerCase()) it.titlecase(Locale.getDefault()) else it.toString()
}
} parameter identifier is expected") // bundle
nextToken()
ruleParam.drop()
return false
Expand Down Expand Up @@ -273,7 +290,7 @@ class SmkStatementParsing(
return result
}

// TODO: cleanup
// TODO: cleanup
// override fun getFunctionParser(): FunctionParsing {
// return super.getFunctionParser()
// }
Expand All @@ -289,13 +306,35 @@ class SmkStatementParsing(
referenceMarker.drop()
return false
}

private fun isToplevelDecoratorKeyword(): Boolean {
return if (myBuilder.tokenText!! in TOPLEVEL_KEYWORDS) {
true
} else {
val workflowParam = myBuilder.mark()
var result = checkNextToken(PyTokenTypes.COLON)
nextToken()
result = result &&
(myBuilder.tokenType.isPythonString() || myBuilder.tokenType == PyTokenTypes.STATEMENT_BREAK)
workflowParam.rollbackTo()
result
}
}

private fun checkNextToken(tt: PyElementType): Boolean {
nextToken()
if (myBuilder.tokenType == tt) {
return true
}
return false
}
}

fun IElementType?.isPythonString() = this in PyTokenTypes.STRING_NODES || this == PyTokenTypes.FSTRING_START

private data class SectionParsingData(
val declaration: IElementType,
val name: String,
val parameterListStatement: PyElementType,
val sectionKeyword: PyElementType
val declaration: IElementType,
val name: String,
val parameterListStatement: PyElementType,
val sectionKeyword: PyElementType
)
Loading

0 comments on commit d97f6d9

Please sign in to comment.