diff --git a/README.md b/README.md index afd7172..7008013 100644 --- a/README.md +++ b/README.md @@ -23,13 +23,13 @@ After `xontrib load output_search` you can select tokens from latest output: For example to get the tokens which contains `xon`: ```shell script -$ echo "Fish out from any output with https://github.com/anki-code/xontrib-output-search" -Fish out from any output with https://github.com/anki-code/xontrib-output-search +$ echo "Try https://github.com/anki-code/xontrib-output-search" +Try https://github.com/anki-code/xontrib-output-search $ git clone xon $ git clone https://github.com/anki-code/xontrib-output-search ``` -Another example: +JSON example: ```shell script $ echo '{"Try": "xontrib-output-search"}' # JSON data {"Try": "xontrib-output-search"} @@ -37,7 +37,23 @@ $ echo I should try x $ echo I should try xontrib-output-search ``` +ENV example: +```shell script +$ env | grep ^PATH= +PATH=/one/two:/three/four +$ ls /t +$ ls /three/four +``` + ## Development +### Clone and test +```shell script +cd ~ +git clone https://github.com/anki-code/xontrib-output-search +cd xontrib-output-search +pytest +``` + ### Tokenizer and generator Tokenizer is the function which extract tokens (words) from the output. After this every token go to generator to search alternatives. @@ -61,8 +77,11 @@ You can enrich tokenizer and generator for your tasks! Feel free to make pool re completely different behavior of tokenizer and generator let's think how to create ability to elegant replace the default functions. ## Known issues -### `cat` is not captured -Use `cat file | head` instead. +#### `cat` is not captured +Workaround: `cat file | head`. + +#### Alt+F may not working in PyCharm terminal +Workaround: `f__` + Tab. ## Thanks * I was inspired by [xontrib-histcpy](https://github.com/con-f-use/xontrib-histcpy). Thanks @con-f-use! diff --git a/setup.py b/setup.py index 5ea5c05..b4fbfa9 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ setup( name='xontrib-output-search', - version='0.1.1', + version='0.1.2', license='BSD', author='anki', author_email='author@example.com', diff --git a/xontrib/output_search.py b/xontrib/output_search.py index b4c7d99..2be29f9 100644 --- a/xontrib/output_search.py +++ b/xontrib/output_search.py @@ -6,17 +6,24 @@ clean_regexp = re.compile(r'[\n\r\t]') color_regexp = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]') framed_regexp = re.compile(r'^["\'{,:]*(.+?)[,}"\':]*$') +env_regexp = re.compile(r'^([A-Z0-9_]+?)=(.*)$') -def _generator(token): +def _generator(token, substring): """ Create alternatives for token. """ - token_variation = [] + tokens = [token] if len(token) > 2: g = framed_regexp.match(token) if g: - token_variation += [g.group(1)] - return token_variation if token_variation != [token] else [] + tokens = [g.group(1)] + tokens + g = env_regexp.match(token) + if g: + env_var = g.group(1) + value = g.group(2) + values = value.split(':') + tokens = values + [env_var, value] + tokens + return [t for t in tokens if substring in t] def _tokenizer(text, substring=''): @@ -27,7 +34,7 @@ def _tokenizer(text, substring=''): selected_tokens = [] for t in tokens: if len(t) > 1 and substring.lower() in t.lower(): - selected_tokens += [t] + _generator(t) + selected_tokens += _generator(t, substring) return set(selected_tokens) diff --git a/xontrib/test_tokenizer.py b/xontrib/test_tokenizer.py index 9a32a24..1ab0be1 100644 --- a/xontrib/test_tokenizer.py +++ b/xontrib/test_tokenizer.py @@ -14,3 +14,9 @@ def test_tokenizer_specials(): def test_tokenizer_substring(): assert _tokenizer('one two three four five six', substring='e') == {'one', 'three', 'five'} + +def test_tokenizer_env(): + assert _tokenizer('SHELL=bash\nPATH=/a/b:/c/d') == {'PATH=/a/b:/c/d', 'PATH', '/a/b:/c/d', '/a/b', '/c/d', 'SHELL=bash', 'SHELL', 'bash'} + +def test_tokenizer_env_substrig(): + assert _tokenizer('SHELL=bash\nPATH=/a/b:/c/d', '/c') == {'PATH=/a/b:/c/d', '/a/b:/c/d', '/c/d'}