From 45aed7bbb72c9c98f738e87eab7fa67296e9c68a Mon Sep 17 00:00:00 2001 From: BethanyG Date: Tue, 18 May 2021 19:36:07 -0700 Subject: [PATCH 01/16] Renamed processing-logs to little-sisiters-vocab. --- .../concept/processing-logs/.docs/hints.md | 28 ---------- .../processing-logs/.docs/instructions.md | 47 ---------------- .../processing-logs/.docs/introduction.md | 5 -- .../concept/processing-logs/.meta/config.json | 20 ------- .../concept/processing-logs/.meta/design.md | 39 -------------- .../concept/processing-logs/.meta/exemplar.py | 27 ---------- exercises/concept/processing-logs/strings.py | 8 --- .../concept/processing-logs/strings_test.py | 53 ------------------- 8 files changed, 227 deletions(-) delete mode 100644 exercises/concept/processing-logs/.docs/hints.md delete mode 100644 exercises/concept/processing-logs/.docs/instructions.md delete mode 100644 exercises/concept/processing-logs/.docs/introduction.md delete mode 100644 exercises/concept/processing-logs/.meta/config.json delete mode 100644 exercises/concept/processing-logs/.meta/design.md delete mode 100644 exercises/concept/processing-logs/.meta/exemplar.py delete mode 100644 exercises/concept/processing-logs/strings.py delete mode 100644 exercises/concept/processing-logs/strings_test.py diff --git a/exercises/concept/processing-logs/.docs/hints.md b/exercises/concept/processing-logs/.docs/hints.md deleted file mode 100644 index 27ceb7b8e4..0000000000 --- a/exercises/concept/processing-logs/.docs/hints.md +++ /dev/null @@ -1,28 +0,0 @@ -# Hints - -## General - -- The [Python documentation for `str`][python-str-doc] has an overview of the Python `str` type. - -## 1. Get message from a log line - -- Strings in Python have [lots of convenient instance methods][str-type-methods] for cleaning, splitting, manipulating, and creating new strings. Extracting values from a string could be done by splitting it based on a substring, for example. - -## 2. Get log level from a log line - -- Strings also have methods that help convert letters from lower to uppercase and vice-versa. - -## 3. Reformat a log line - -Strings are immutable, but can be combined together to make new strings, or have elements replaced. This goal can be accomplished by using string methods, or operators like `+` or `+=` (which are overloaded to work with strings). -Python also has a concept of string formatting, like many other languages. - -- The [`str.join()`][str-join] method is useful to join an iterable of strings into one string by interspersing them with a common value, e.g. `":".join("abcde")` would create `"a:b:c:d:e"`. -- [`str.format()`][str-format] is an idiomatic way to do string interpolation in Python (inserting one or more string value(s) into another). -- [Format strings][format-str] are another convenient way to interpolate values into a string. This strategy is particularly useful when more than one named variable needs to be inserted into a final output. - -[python-str-doc]: https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str -[str-type-methods]: https://docs.python.org/3/library/stdtypes.html#str -[str-join]: https://docs.python.org/3/library/stdtypes.html#str.join -[str-format]: https://docs.python.org/3/library/stdtypes.html#str.format -[format-str]: https://docs.python.org/3/library/string.html#formatstrings diff --git a/exercises/concept/processing-logs/.docs/instructions.md b/exercises/concept/processing-logs/.docs/instructions.md deleted file mode 100644 index 01a6c9cbd6..0000000000 --- a/exercises/concept/processing-logs/.docs/instructions.md +++ /dev/null @@ -1,47 +0,0 @@ -# Instructions - -In this exercise you'll be processing log-lines. - -Each log line is a string formatted as follows: `"[]: "`. - -There are three different log levels: - -- `INFO` -- `WARNING` -- `ERROR` - -You have three tasks, each of which will take a log line and ask you to do something with it. - -## 1. Extract a message from a log line - -Implement the `extract_message` to return a log line's message: - -```python ->>> extract_message("[ERROR]: Invalid operation") -'Invalid operation' -``` - -The message should be trimmed of any whitespace. - -```python ->>> extract_message("[ERROR]: Invalid operation.\t\n") -'Invalid operation.' -``` - -## 2. Change a message's loglevel. - -Implement the `change_log_level` function to replace a log line's current log level with a new one: - -```python ->>> change_log_level("[INFO]: Fatal Error.", "ERROR") -'[ERROR]: Fatal Error.' -``` - -## 3. Reformat a log line - -Implement the `reformat` function to reformat the log line, putting the message first and the log level after it in parentheses: - -```python ->>> reformat("[INFO]: Operation completed") -'Operation completed (info)' -``` diff --git a/exercises/concept/processing-logs/.docs/introduction.md b/exercises/concept/processing-logs/.docs/introduction.md deleted file mode 100644 index f3d9c1c78f..0000000000 --- a/exercises/concept/processing-logs/.docs/introduction.md +++ /dev/null @@ -1,5 +0,0 @@ -# Introduction - -A `str` typed object in Python is an immutable sequence of Unicode code points. This could include letters, numbers, punctuation, etc. To manipulate strings, Python provides string methods that can transform a string into other types, create new strings based on method arguments, or return information about the string. Strings can be concatenated with `+`. - -Immutability means that a string's value doesn't change; methods that appear to modify a string actually return a new instance of `str`. diff --git a/exercises/concept/processing-logs/.meta/config.json b/exercises/concept/processing-logs/.meta/config.json deleted file mode 100644 index 95b4709889..0000000000 --- a/exercises/concept/processing-logs/.meta/config.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "blurb": "Learn about strings by processing log files.", - "authors": [ - "aldraco" - ], - "files": { - "solution": [ - "strings.py" - ], - "test": [ - "strings_test.py" - ], - "exemplar": [ - ".meta/exemplar.py" - ] - }, - "forked_from": [ - "csharp/strings" - ] -} diff --git a/exercises/concept/processing-logs/.meta/design.md b/exercises/concept/processing-logs/.meta/design.md deleted file mode 100644 index 2797712381..0000000000 --- a/exercises/concept/processing-logs/.meta/design.md +++ /dev/null @@ -1,39 +0,0 @@ -# Design - -## Goal - -The goal of this exercise is to teach the student about Python strings, and familiarize them with string manipulation in Python. - -## Things to teach - -- Know that Python has a `str` type. -- Know how to find items in a string. -- Know how to manipulate strings to create new strings. -- Familiarize one's self with string instance methods in Python. -- Learn about string formatting. - -## Things not to teach - -- Regex: `regex` is a useful tool for a solution, but isn't required. -- Iteration: Although strings are iterable, this is not the focus of this exercise. - -## Concepts - -The Concepts this exercise unlocks are: - -- `strings-basic`: know about `str` type in Python, know some basic methods, know about formatting. - -## Prerequisites - -- `functions`: The student should be familiar with creating functions. - -## Representer - -This exercise does not require any logic to be added to the [representer][representer] - -## Analyzer - -This exercise does not require any logic to be added to the [analyzer][analyzer]. - -[analyzer]: https://github.com/exercism/python-analyzer -[representer]: https://github.com/exercism/python-representer diff --git a/exercises/concept/processing-logs/.meta/exemplar.py b/exercises/concept/processing-logs/.meta/exemplar.py deleted file mode 100644 index 89f2d8930e..0000000000 --- a/exercises/concept/processing-logs/.meta/exemplar.py +++ /dev/null @@ -1,27 +0,0 @@ -import re - -LOGLINE_RE = r"\[(?PINFO|ERROR|WARN)\] (?P.*)" - - -def _extract_pieces(message): - pieces = re.search(LOGLINE_RE, message) - return pieces.group("level"), pieces.group("msg") - -def _extract_pieces_no_regex_groups(message): - words = [word for word in re.split("[\s\[\]]", message) if word] - return words[0], " ".join(words[1:]) - -def _extract_pieces_no_regex(message): - words = [word for word in message.strip().replace("]", "[").split("[") if word] - return words[0], words[1].strip() - -def change_log_level(message, new_loglevel): - """Change loglevel of message to new_loglevel.""" - return f"[{new_loglevel}] {extract_message(message)}" - -def extract_message(message): - return _extract_pieces_no_regex(message)[1] - -def reformat(message): - loglevel, msg = _extract_pieces_no_regex_groups(message) - return f"{msg} ({loglevel.lower()})" diff --git a/exercises/concept/processing-logs/strings.py b/exercises/concept/processing-logs/strings.py deleted file mode 100644 index 11d9afe842..0000000000 --- a/exercises/concept/processing-logs/strings.py +++ /dev/null @@ -1,8 +0,0 @@ -def extract_message(): - pass - -def change_log_level(): - pass - -def reformat(): - pass diff --git a/exercises/concept/processing-logs/strings_test.py b/exercises/concept/processing-logs/strings_test.py deleted file mode 100644 index 4e0bdd991d..0000000000 --- a/exercises/concept/processing-logs/strings_test.py +++ /dev/null @@ -1,53 +0,0 @@ -import unittest -from strings import extract_message, change_log_level, reformat - - -class TestLogLines(unittest.TestCase): - def test_message(self): - self.assertEqual( - extract_message("[INFO] Hello there."), - "Hello there.", - msg="Should correctly extract a basic message.", - ) - - def test_message_with_punctuation(self): - self.assertEqual( - extract_message("[WARN] File not found: exercism_practice.py"), - "File not found: exercism_practice.py", - msg="Should preserve punctuation and whitespace from original message.", - ) - - def test_level_word_remains_in_message(self): - self.assertEqual( - extract_message("[ERROR] Error while serializing data."), - "Error while serializing data.", - msg="Should preserve a loglevel word that is actually part of the message.", - ) - - def test_change_loglevel(self): - self.assertEqual( - change_log_level("[ERROR] No configs found, but not a big deal.", "INFO"), - "[INFO] No configs found, but not a big deal.", - msg="Should replace the loglevel.", - ) - - def test_change_loglevel_with_loglevel_in_message(self): - self.assertEqual( - change_log_level("[WARN] Warning: file does not exist.", "INFO"), - "[INFO] Warning: file does not exist.", - msg="Should not replace loglevel names that are part of the message.", - ) - - def test_reformat(self): - self.assertEqual( - reformat("[WARN] Warning: file not found."), - "Warning: file not found. (warn)", - msg="Should reformat with lowercase loglevel. ", - ) - - def test_capture_message_when_contains_loglevel_string(self): - self.assertEqual( - extract_message("[WARN] Warning: file not found. Will WARN once."), - "Warning: file not found. Will WARN once.", - msg="Should extract loglevel inside brackets, not from messsage, even if message contains something that looks like a loglevel.", - ) From 6fc27155e9cda0a9e7f93bc0ecfca6774debeb34 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Tue, 18 May 2021 19:36:59 -0700 Subject: [PATCH 02/16] Rewrote hints.md, instructions.md, and introduction.md. --- .../little-sisters-vocab/.docs/hints.md | 39 ++++ .../.docs/instructions.md | 69 ++++++ .../.docs/introduction.md | 203 ++++++++++++++++++ 3 files changed, 311 insertions(+) create mode 100644 exercises/concept/little-sisters-vocab/.docs/hints.md create mode 100644 exercises/concept/little-sisters-vocab/.docs/instructions.md create mode 100644 exercises/concept/little-sisters-vocab/.docs/introduction.md diff --git a/exercises/concept/little-sisters-vocab/.docs/hints.md b/exercises/concept/little-sisters-vocab/.docs/hints.md new file mode 100644 index 0000000000..d1aba42ca7 --- /dev/null +++ b/exercises/concept/little-sisters-vocab/.docs/hints.md @@ -0,0 +1,39 @@ +# Hints + +## General + +- The [Python Docs Tutorial for strings][python-str-doc] has an overview of the Python `str` type. +- String methods [.join()][str-join] and [.split()][str-split] ar very helpful when processing strings. +- The [Python Docs on Sequence Types][common sequence operations] has a rundown of operations common to all sequences, including `strings`, `lists`, `tuples`, and `ranges`. + +You're helping your younger sister with her English vocabulary homework, which she's finding very tedious. Her class is learning to create new words by adding _prefixes_ and _suffixes_, and have been given different sets of words to modify. The teacher is looking for correctly transformed words and correct spelling, given the word beginning or ending. + +There's four activities in the assignment, each with a set of text or words to work with. + +## 1. Add a prefix to a word + +- Small strings can be concatenated with the `+` operator. + +## 2. Add prefixes to word groups + +- Believe it or not, .join() is all you need. +- Like `.split()`, `.join()` can take an arbitrary-length string, made up of any unicode code points. + +## 3. Remove a suffix from a word + +- Strings can be both indexed and sliced from either the left (starting at 0) or the right (starting at -1). +- If you want the last code point of an arbitrary-length string, you can use [-1]. +- The last three letters in a string can be "sliced off" using a negative index. e.g. 'beautiful'[:-3] == 'beauti' + +## 4. Extract and transform a word + +- Using `.split()` returns a list of strings broken on white space. +- `lists` are sequences, and can be indexed. +- `.split()` can be direcly indexed. e.g. `'Exercism rocks!'.split()[0] == 'Exercism'` +- Be careful of punctuation! Periods can be removed via slice: `'dark.'[:-1] == 'dark'` + +[python-str-doc]: https://docs.python.org/3/tutorial/introduction.html#strings + +[common sequence operations]https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str +[str-join]: https://docs.python.org/3/library/stdtypes.html#str.join +[str-split]: https://docs.python.org/3/library/stdtypes.html#str.split diff --git a/exercises/concept/little-sisters-vocab/.docs/instructions.md b/exercises/concept/little-sisters-vocab/.docs/instructions.md new file mode 100644 index 0000000000..7fc068bf10 --- /dev/null +++ b/exercises/concept/little-sisters-vocab/.docs/instructions.md @@ -0,0 +1,69 @@ +# Instructions + +You're helping your younger sister with her English vocabulary homework, which she's finding very tedious. Her class is learning to create new words by adding _prefixes_ and _suffixes_, and have been given different sets of words to modify. The teacher is looking for correctly transformed words and correct spelling, given the word beginning or ending. + +There's four activities in the assignment, each with a set of text or words to work with. + +## 1. Add a prefix to a word + +One of the most common prefixes in English is `un`, meaning "not". In this activity, your sister needs to make negative, or "not" words by adding `un` to them. + +Implement the `add_prefix_un()` function that takes `word` as a parameter and returns a new `un` prefixed word: + +```python +>>> add_prefix_un("happy") +'unhappy' + +>>> add_prefix("manageable") +'unmanagable' +``` + +## 2. Add prefixes to word groups + +There are four more common prefixes that your sister's class is studying: `en` (_meaning to 'put into' or 'cover with'_), `pre` (_meaning before or forward _), `auto` (_meaning self or same_), and `inter` (_meaning between or among_). In this exercise, the class is creating groups of vocabulary words using these prefixes, so they can be studied together. Each prefix comes in a list with common words it's used with. The students need to apply the prefix and produce a string that shows the prefix and the words with the prefix applied. + +Implement the `make_word_groups()` function that takes a `vocab_words` as a parameter in the following form: [``, `word_1`, `word_2` .... `word_n`], and returns a string with the prefix applied to each word that looks like `' :: :: :: '`. + +```python +>>> make_word_groups(['en', close, joy, lighten]) +'en :: enclose :: enjoy :: enlighten' + +>>> make_word_groups(['pre', serve, dispose, position]) +'pre :: preserve :: predispose :: preposition' + +>> make_word_groups(['auto', 'didactic', 'graph', 'mate']) +'auto :: autodidactic :: autograph :: automate' + +>>> make_word_groups(['inter', 'twine', 'connected', 'dependant']) +'inter :: intertwine :: interconnected :: interdependant' +``` + +## 3. Remove a suffix from a word + +`ness` is a common suffix that means "state of being". In this activity, your sister needs to "extract" the original root word by removing the `ness` suffix. But of course there are pesky spelling rules. If the root word originally ended in a consonant followed by a 'y', then the 'y' was changed to to 'i'. So removing the 'ness' needs to restore the 'y' in the original word. `happiness` --> `happi` --> `happy`. + +Implement the `remove_suffix_ness()` function that takes in a word and returns the base word with `ness` removed. + +```python +>>> remove_suffix_ness("heaviness") +'heavy' + +>>> remove_suffix_ness("sadness") +'sad' +``` + +## 4. Extract and transform a word + +Suffixes are often used to change the part of speech a word has. A common practice in English is "verbing" or "verbifying" -- where a ajective _becomes_ a verb by adding an `en` suffix. + +In this task, your sister is going to practice "verbing" by extracting an ajective from a sentence and turning it into a verb. Fortunately, all the words that need to be transformed here are "regular" - they don't need spelling changes to add the suffix. + +Implement the `noun_to_verb()` function that takes two parameters. A `sentence` using the vocabulary word, and the `index` of the word, once that sentence is split apart. The function should return the extracted ajective as a verb. + +```python +>>> noun_to_verb('I need to make that bright.', -1 ) +'brighten' + +>>> noun_to_verb('It got dark as the sun set.', 2) +'darken' +``` diff --git a/exercises/concept/little-sisters-vocab/.docs/introduction.md b/exercises/concept/little-sisters-vocab/.docs/introduction.md new file mode 100644 index 0000000000..5c2ed6901a --- /dev/null +++ b/exercises/concept/little-sisters-vocab/.docs/introduction.md @@ -0,0 +1,203 @@ +# Introduction + +A `str` in Python is an [immutable sequence][text sequence] of [Unicode code points][unicode code points]. +These could include letters, diacritical marks, positioning characters, numbers, currecy symbols, emoji, punctuation, space and line break characters, and more. +Being immutable, a `str` object's value in memory doesn't change; methods that appear to modify a string return a new copy or instance of `str`. + +A `str` literal can be declared via single `'` or double `"` quotes. The escape `\` character is available as needed. + +```python + +>>> single_quoted = 'These allow "double quoting" without "escape" characters.' + +>>> double_quoted = "These allow embedded 'single quoting', so you don't have to use an 'escape' character". +``` + +Muliti-line strings are declared with `'''` or `"""`. + +```python +>>> triple_quoted = '''Three single quotes or "double quotes" in a row allow for multi-line string literals. + Line break characters, tabs and other whitespace is fully supported. + + You\'ll most often encounter these as "doc strings" or "doc tests" written just below the first line of a function or class definition. + They\'re often used with auto documentation ✍ tools. + ''' +``` + +Strings can be concatenated using the `+` operator. +This method should be used sparingly, as it is not very performant or easily maintained. + +```python +language = "Ukrainian" +number = "nine" +word = "девять" + +sentence = word + " " + "means" + " " + number + " in " + language + "." + +>>> print(sentence) +... +"девять means nine in Ukrainian." +``` + +If a `list`, `tuple`, `set` or other collection of individual strings needs to be combined into a single `str`, [`.join()`][str-join], is a better option: + +```python +# str.join() makes a new string from the iterables elements. +>>> chickens = ["hen", "egg", "rooster"] +>>> ' '.join(chickens) +'hen egg rooster' + +# Any string can be used as the joining element. +>>> ' :: '.join(chickens) +'hen :: egg :: rooster' + +>>> ' 🌿 '.join(chickens) +'hen 🌿 egg 🌿 rooster' +``` + +Code points within a `str` can be referenced by 0-based index number from the right: + +```python +creative = '창의적인' + +>>> creative[0] +'창' + +>>> creative[2] +'적' + +>>> creative[3] +'인' +``` + +Indexing also works from the left, starting with `-1`: + +```python +creative = '창의적인' + +>>> creative[-4] +'창' + +>>> creative[-2] +'적' + +>>> creative[-1] +'인' + +``` + +There is no separate “character” or "rune" type in Python, so indexing a string produces a new `str` of length 1: + +```python + +>>> website = "exercism" +>>> type(website[0]) + + +>>> len(website[0]) +1 + +>>> website[0] == website[0:1] == 'e' +True +``` + +Substrings can be selected via _slice notation_, using [`[:stop:]`][common sequence operations] to produce a new string. +Results exclude the `stop` index. +If no `start` is given, the starting index will be 0. +If no `stop` is given, the `stop` index will be the end of the string. + +```python +moon_and_stars = '🌟🌟🌙🌟🌟⭐' + +>>> moon_and_stars[1:4] +'🌟🌙🌟' + +>>> moon_and_stars[:3] +'🌟🌟🌙' + +>>> moon_and_stars[3:] +'🌟🌟⭐' + +>>> moon_and_stars[:-1] +'🌟🌟🌙🌟🌟' + +>>> moon_and_stars[:-3] +'🌟🌟🌙' +``` + +Strings can also be broken into smaller strings via [`.split()`][str-split], which will return a `list` of substrings. +The list can then be further indexed or split, if needed. +Using `.split()` without any arguments will split the string on whitespace. + +```python +>>> cat_ipsum = "Destroy house in 5 seconds mock the hooman." +>>> cat_ipsum.split() +... +['Destroy', 'house', 'in', '5', 'seconds', 'mock', 'the', 'hooman.'] + + +>>> cat_ipsum.split()[-1] +'hooman.' + + +>>> cat_words = "feline, four-footed, ferocious, furry" +>>> cat_words.split(',') +... +['feline, four-footed, ferocious, furry'] +``` + +Seperators for `.split()` can be more than one character. The whole string will be used for matching. + +```python + +>>> colors = """red, +orange, +green, +purple, +yellow""" + +>>> colors.split(',\n') +['red', 'orange', 'green', 'purple', 'yellow'] +``` + +Strings support all [common sequence operations][common sequence operations]. +Individual code points can be iterated through in a loop via `for item in `. +Indexes _with_ items can be iterated through in a loop via `for index, item in enumerate()` + +```python + +>>> exercise = 'လေ့ကျင့်' + +# Note that there are more code points than percieved glyphs or characters +>>> for code_point in exercise: +... print(code_point) +... +လ +ေ +့ +က +ျ +င +် +့ + +# Using enumerate will give both the value and index position of each element. +>>> for index, code_point in enumerate(exercise): +... print(index, ": ", code_point) +... +0 : လ +1 : ေ +2 : ့ +3 : က +4 : ျ +5 : င +6 : ် +7 : ့ +``` + +[text sequence]: https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str +[unicode code points]: https://stackoverflow.com/questions/27331819/whats-the-difference-between-a-character-a-code-point-a-glyph-and-a-grapheme +[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#common-sequence-operations +[str-split]: https://docs.python.org/3/library/stdtypes.html#str.split +[str-join]: https://docs.python.org/3/library/stdtypes.html#str.join +[str-constructor]: https://docs.python.org/3/library/stdtypes.html#str From 4120d11cf6bc66a3866b17fa5812553eb497fc80 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Tue, 18 May 2021 19:46:57 -0700 Subject: [PATCH 03/16] Rewrote config.json, design.md, and exemplar.py. --- .../little-sisters-vocab/.meta/config.json | 9 +++ .../little-sisters-vocab/.meta/design.md | 33 ++++++++++ .../little-sisters-vocab/.meta/exemplar.py | 61 +++++++++++++++++++ 3 files changed, 103 insertions(+) create mode 100644 exercises/concept/little-sisters-vocab/.meta/config.json create mode 100644 exercises/concept/little-sisters-vocab/.meta/design.md create mode 100644 exercises/concept/little-sisters-vocab/.meta/exemplar.py diff --git a/exercises/concept/little-sisters-vocab/.meta/config.json b/exercises/concept/little-sisters-vocab/.meta/config.json new file mode 100644 index 0000000000..a9516e4ac3 --- /dev/null +++ b/exercises/concept/little-sisters-vocab/.meta/config.json @@ -0,0 +1,9 @@ +{ + "blurb": "Learn about strings by helping your little sister with her vocabulary homework.", + "icon": "two-fer", + "authors": ["aldraco", "bethanyg"], + "files": { + "solution": ["strings.py"], + "test": ["strings_test.py"], + "exemplar": [".meta/exemplar.py"] + } \ No newline at end of file diff --git a/exercises/concept/little-sisters-vocab/.meta/design.md b/exercises/concept/little-sisters-vocab/.meta/design.md new file mode 100644 index 0000000000..e6affd8c18 --- /dev/null +++ b/exercises/concept/little-sisters-vocab/.meta/design.md @@ -0,0 +1,33 @@ +# Design + +## Goal + +The goal of this exercise is to teach the student about Python strings, and familiarize them with string manipulation in Python. + +## Things to teach + +- Know that Python has a `str` type. +- How to use `.join()` +- How to use `.split()` +- Know how to manipulate strings to create new strings. + +## Things not to teach + +- Regex: `regex`. That is for a different exercise. +- Iteration: Although strings are iterable, this is not the focus of this exercise. + + +## Prerequisites + +- `basics`: The student should be familiar with the basics exercise. + +## Representer + +This exercise does not require any logic to be added to the [representer][representer] + +## Analyzer + +This exercise does not require any logic to be added to the [analyzer][analyzer]. + +[analyzer]: https://github.com/exercism/python-analyzer +[representer]: https://github.com/exercism/python-representer diff --git a/exercises/concept/little-sisters-vocab/.meta/exemplar.py b/exercises/concept/little-sisters-vocab/.meta/exemplar.py new file mode 100644 index 0000000000..538435daa8 --- /dev/null +++ b/exercises/concept/little-sisters-vocab/.meta/exemplar.py @@ -0,0 +1,61 @@ +def add_prefix_un(word): + ''' + + :param word: str of a root word + :return: str of root word with un prefix + + This function takes `word` as a parameter and + returns a new word with an 'un' prefix. + ''' + return 'un' + word + +def make_word_groups(vocab_words): + ''' + + :param vocab_words: list of vocabulary words with a prefix. + :return: str of prefix followed by vocabulary words with + prefix applied, separated by ' :: '. + + This function takes a `vocab_words` list and returns a string + with the prefix and the words with prefix applied, separated + by ' :: '. + ''' + prefix = vocab_words[0] + joiner = ' :: ' + prefix + + return joiner.join(vocab_words) + +def remove_suffix_ness(word): + ''' + + :param word: str of word to remvoe suffix from. + :return: str of word with suffix removed & spelling adjusted. + + This function takes in a word and returns the base word with `ness` removed. + ''' + word = word[:-4] + if word[-1] == 'i': + word = word[:-1] + 'y' + + return word + +def noun_to_verb(sentence, index): + ''' + + :param sentence: str that uses the word in sentance + :param index: index of the word to remove and transform + :return: str word that changes the extracted adjective to a verb. + + A function takes a `sentence` using the + vocabulary word, and the `index` of the word once that sentence + is split apart. The function should return the extracted + adjective as a verb. + ''' + word = sentence.split()[index] + + if word[-1] == '.': + word = word[:-1] + 'en' + else: + word = word + 'en' + + return word \ No newline at end of file From 88b7c2d0641b5f35aa8efeabfc47969935bfb8c5 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Tue, 18 May 2021 19:48:33 -0700 Subject: [PATCH 04/16] Rewrote stub file and strings_test.py --- .../concept/little-sisters-vocab/strings.py | 47 ++++++++++ .../little-sisters-vocab/strings_test.py | 86 +++++++++++++++++++ 2 files changed, 133 insertions(+) create mode 100644 exercises/concept/little-sisters-vocab/strings.py create mode 100644 exercises/concept/little-sisters-vocab/strings_test.py diff --git a/exercises/concept/little-sisters-vocab/strings.py b/exercises/concept/little-sisters-vocab/strings.py new file mode 100644 index 0000000000..e21b565aa6 --- /dev/null +++ b/exercises/concept/little-sisters-vocab/strings.py @@ -0,0 +1,47 @@ +def add_prefix_un(word): + ''' + + :param word: str of a root word + :return: str of root word with un prefix + + This function takes `word` as a parameter and + returns a new word with an 'un' prefix. + ''' + pass + +def make_word_groups(vocab_words): + ''' + + :param vocab_words: list of vocabulary words with a prefix. + :return: str of prefix followed by vocabulary words with + prefix applied, separated by ' :: '. + + This function takes a `vocab_words` list and returns a string + with the prefix and the words with prefix applied, separated + by ' :: '. + ''' + pass + +def remove_suffix_ness(word): + ''' + + :param word: str of word to remvoe suffix from. + :return: str of word with suffix removed & spelling adjusted. + + This function takes in a word and returns the base word with `ness` removed. + ''' + pass + +def noun_to_verb(sentence, index): + ''' + + :param sentence: str that uses the word in sentance + :param index: index of the word to remove and transform + :return: str word that changes the extracted adjective to a verb. + + A function takes a `sentence` using the + vocabulary word, and the `index` of the word once that sentence + is split apart. The function should return the extracted + adjective as a verb. + ''' + pass \ No newline at end of file diff --git a/exercises/concept/little-sisters-vocab/strings_test.py b/exercises/concept/little-sisters-vocab/strings_test.py new file mode 100644 index 0000000000..c0a8ecd213 --- /dev/null +++ b/exercises/concept/little-sisters-vocab/strings_test.py @@ -0,0 +1,86 @@ +import unittest +from strings import add_prefix_un, make_word_groups, remove_suffix_ness, noun_to_verb + + +class TestStrings(unittest.TestCase): + def test_add_prefix_un(self): + input_data = ["happy", "manageable", "fold", "eaten", "avoidable", "usual"] + result_data = [f'un{item}' for item in input_data] + number_of_variants = range(1, len(input_data) + 1) + + for variant, word, result in zip(number_of_variants, input_data, result_data): + with self.subTest(f"variation #{variant}", word=word, result=result): + self.assertEqual(add_prefix_un(word), result, + msg=f'Expected: {result} but got a different word instead.') + + + def test_make_word_groups_en(self): + input_data = ['en' ,'circle', 'fold', 'close','joy', 'lighten', 'tangle', 'able', 'code', 'culture'] + result_data = 'en :: encircle :: enfold :: enclose :: enjoy :: enlighten :: entangle :: enable :: encode :: enculture' + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + + def test_make_word_groups_pre(self): + input_data = ['pre', 'serve', 'dispose', 'position', 'requisite', 'digest', + 'natal', 'addressed', 'adolescent', 'assumption', 'mature', 'compute'] + result_data = ('pre :: preserve :: predispose :: preposition :: prerequisite :: ' + 'predigest :: prenatal :: preaddressed :: preadolescent :: preassumption :: ' + 'premature :: precompute') + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + + def test_make_word_groups_auto(self): + input_data = ['auto', 'didactic', 'graph', 'mate', 'chrome', 'centric', 'complete', + 'echolalia', 'encoder', 'biography'] + result_data = ('auto :: autodidactic :: autograph :: automate :: autochrome :: ' + 'autocentric :: autocomplete :: autoecholalia :: autoencoder :: ' + 'autobiography') + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + + def test_make_words_groups_inter(self): + input_data = ['inter', 'twine', 'connected', 'dependent', 'galactic', 'action', + 'stellar', 'cellular', 'continental', 'axial', 'operative', 'disciplinary'] + result_data = ('inter :: intertwine :: interconnected :: interdependent :: ' + 'intergalactic :: interaction :: interstellar :: intercellular :: ' + 'intercontinental :: interaxial :: interoperative :: interdisciplinary') + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + + def test_remove_suffix_ness(self): + input_data = ["heaviness", "sadness", "softness", "crabbiness", "lightness", "artiness", "edginess"] + result_data = ["heavy", "sad", "soft", 'crabby', 'light', 'arty', 'edgy'] + number_of_variants = range(1, len(input_data) + 1) + + for variant, word, result in zip(number_of_variants, input_data, result_data): + with self.subTest(f"variation #{variant}", word=word, result=result): + self.assertEqual(remove_suffix_ness(word), result, + msg=f'Expected: {result} but got a different word instead.') + + def test_noun_to_verb(self): + input_data = ['Look at the bright sky.', + 'His expression went dark.', + 'The bread got hard after sitting out.', + 'The butter got soft in the sun.', + 'Her face was filled with light.', + 'The morning fog made everything damp with mist.', + 'He cut the fence pickets short by mistake.', + 'Charles made weak crying noises.', + 'The black oil got on the white dog.'] + index_data = [-2, -1, 3, 3, -1, -3, 5, 2, 1] + result_data = ['brighten','darken','harden','soften', + 'lighten','dampen','shorten','weaken','blacken'] + number_of_variants = range(1, len(input_data) + 1) + + for variant, sentence, index, result in zip(number_of_variants, input_data, index_data, result_data): + with self.subTest(f"variation #{variant}", sentence=sentence, index=index, result=result): + self.assertEqual(noun_to_verb(sentence, index), result, + msg=f'Expected: {result} but got a different word instead.') \ No newline at end of file From 1194ffb4034993e0cd275499c868b7b549c42b05 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Tue, 18 May 2021 19:56:37 -0700 Subject: [PATCH 05/16] Corrected malformed JSON. --- exercises/concept/little-sisters-vocab/.meta/config.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/exercises/concept/little-sisters-vocab/.meta/config.json b/exercises/concept/little-sisters-vocab/.meta/config.json index a9516e4ac3..d7eecdcea9 100644 --- a/exercises/concept/little-sisters-vocab/.meta/config.json +++ b/exercises/concept/little-sisters-vocab/.meta/config.json @@ -6,4 +6,5 @@ "solution": ["strings.py"], "test": ["strings_test.py"], "exemplar": [".meta/exemplar.py"] - } \ No newline at end of file + } +} From 27f283bb387ce78aa6681fee6abea680a02df2ee Mon Sep 17 00:00:00 2001 From: BethanyG Date: Tue, 18 May 2021 20:07:50 -0700 Subject: [PATCH 06/16] Added rewritten exercise to config.json. --- config.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.json b/config.json index d85d2c16bb..d3a0f7e3c5 100644 --- a/config.json +++ b/config.json @@ -28,8 +28,8 @@ "exercises": { "concept": [ { - "slug": "processing-logs", - "name": "Processing Logs", + "slug": "little-sisters-vocab", + "name": "Little Sisters Vocab", "uuid": "5a9b42fb-ddf4-424b-995d-f9952ea63c37", "concepts": ["strings"], "prerequisites": ["basics"], From 5c2839ab1fe6259ab6a6f5542fdeab8bb74b1bf4 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Thu, 20 May 2021 10:52:11 -0700 Subject: [PATCH 07/16] Update exercises/concept/little-sisters-vocab/.docs/introduction.md --- exercises/concept/little-sisters-vocab/.docs/introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/concept/little-sisters-vocab/.docs/introduction.md b/exercises/concept/little-sisters-vocab/.docs/introduction.md index 5c2ed6901a..122027ce40 100644 --- a/exercises/concept/little-sisters-vocab/.docs/introduction.md +++ b/exercises/concept/little-sisters-vocab/.docs/introduction.md @@ -55,7 +55,7 @@ If a `list`, `tuple`, `set` or other collection of individual strings needs to b 'hen 🌿 egg 🌿 rooster' ``` -Code points within a `str` can be referenced by 0-based index number from the right: +Code points within a `str` can be referenced by 0-based index number from the left: ```python creative = '창의적인' From 985934cfb6084be4a9e3a9f6ed802cc4e9325446 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Thu, 20 May 2021 10:53:49 -0700 Subject: [PATCH 08/16] Update exercises/concept/little-sisters-vocab/.docs/introduction.md --- exercises/concept/little-sisters-vocab/.docs/introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/concept/little-sisters-vocab/.docs/introduction.md b/exercises/concept/little-sisters-vocab/.docs/introduction.md index 122027ce40..9ec38f8bb7 100644 --- a/exercises/concept/little-sisters-vocab/.docs/introduction.md +++ b/exercises/concept/little-sisters-vocab/.docs/introduction.md @@ -70,7 +70,7 @@ creative = '창의적인' '인' ``` -Indexing also works from the left, starting with `-1`: +Indexing also works from the right, starting with `-1`: ```python creative = '창의적인' From 5bd3eab8c2f1a3220a3171940dbe7fed545857c0 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Thu, 20 May 2021 10:59:24 -0700 Subject: [PATCH 09/16] Update exercises/concept/little-sisters-vocab/.docs/hints.md --- exercises/concept/little-sisters-vocab/.docs/hints.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/concept/little-sisters-vocab/.docs/hints.md b/exercises/concept/little-sisters-vocab/.docs/hints.md index d1aba42ca7..6fd94d1622 100644 --- a/exercises/concept/little-sisters-vocab/.docs/hints.md +++ b/exercises/concept/little-sisters-vocab/.docs/hints.md @@ -34,6 +34,6 @@ There's four activities in the assignment, each with a set of text or words to w [python-str-doc]: https://docs.python.org/3/tutorial/introduction.html#strings -[common sequence operations]https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str +[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str [str-join]: https://docs.python.org/3/library/stdtypes.html#str.join [str-split]: https://docs.python.org/3/library/stdtypes.html#str.split From d3f70634a8f0cb84142cabc61ed8fb695514a45f Mon Sep 17 00:00:00 2001 From: BethanyG Date: Thu, 27 May 2021 19:23:17 -0700 Subject: [PATCH 10/16] Added task annotations to test file and added newline to stub and exemplar. --- .../little-sisters-vocab/.meta/exemplar.py | 2 +- .../concept/little-sisters-vocab/strings.py | 2 +- .../little-sisters-vocab/strings_test.py | 23 +++++++++++++------ 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/exercises/concept/little-sisters-vocab/.meta/exemplar.py b/exercises/concept/little-sisters-vocab/.meta/exemplar.py index 538435daa8..c190de6e8d 100644 --- a/exercises/concept/little-sisters-vocab/.meta/exemplar.py +++ b/exercises/concept/little-sisters-vocab/.meta/exemplar.py @@ -58,4 +58,4 @@ def noun_to_verb(sentence, index): else: word = word + 'en' - return word \ No newline at end of file + return word diff --git a/exercises/concept/little-sisters-vocab/strings.py b/exercises/concept/little-sisters-vocab/strings.py index e21b565aa6..0a9ee47ee8 100644 --- a/exercises/concept/little-sisters-vocab/strings.py +++ b/exercises/concept/little-sisters-vocab/strings.py @@ -44,4 +44,4 @@ def noun_to_verb(sentence, index): is split apart. The function should return the extracted adjective as a verb. ''' - pass \ No newline at end of file + pass diff --git a/exercises/concept/little-sisters-vocab/strings_test.py b/exercises/concept/little-sisters-vocab/strings_test.py index c0a8ecd213..8006eb83ff 100644 --- a/exercises/concept/little-sisters-vocab/strings_test.py +++ b/exercises/concept/little-sisters-vocab/strings_test.py @@ -1,8 +1,16 @@ import unittest -from strings import add_prefix_un, make_word_groups, remove_suffix_ness, noun_to_verb +import pytest +from strings import ( + add_prefix_un, + make_word_groups, + remove_suffix_ness, + noun_to_verb +) class TestStrings(unittest.TestCase): + + @pytest.mark.task(taskno=1) def test_add_prefix_un(self): input_data = ["happy", "manageable", "fold", "eaten", "avoidable", "usual"] result_data = [f'un{item}' for item in input_data] @@ -13,7 +21,7 @@ def test_add_prefix_un(self): self.assertEqual(add_prefix_un(word), result, msg=f'Expected: {result} but got a different word instead.') - + @pytest.mark.task(taskno=2) def test_make_word_groups_en(self): input_data = ['en' ,'circle', 'fold', 'close','joy', 'lighten', 'tangle', 'able', 'code', 'culture'] result_data = 'en :: encircle :: enfold :: enclose :: enjoy :: enlighten :: entangle :: enable :: encode :: enculture' @@ -21,7 +29,7 @@ def test_make_word_groups_en(self): self.assertEqual(make_word_groups(input_data), result_data, msg=f'Expected {result_data} but got something else instead.') - + @pytest.mark.task(taskno=2) def test_make_word_groups_pre(self): input_data = ['pre', 'serve', 'dispose', 'position', 'requisite', 'digest', 'natal', 'addressed', 'adolescent', 'assumption', 'mature', 'compute'] @@ -32,7 +40,7 @@ def test_make_word_groups_pre(self): self.assertEqual(make_word_groups(input_data), result_data, msg=f'Expected {result_data} but got something else instead.') - + @pytest.mark.task(taskno=2) def test_make_word_groups_auto(self): input_data = ['auto', 'didactic', 'graph', 'mate', 'chrome', 'centric', 'complete', 'echolalia', 'encoder', 'biography'] @@ -43,7 +51,7 @@ def test_make_word_groups_auto(self): self.assertEqual(make_word_groups(input_data), result_data, msg=f'Expected {result_data} but got something else instead.') - + @pytest.mark.task(taskno=2) def test_make_words_groups_inter(self): input_data = ['inter', 'twine', 'connected', 'dependent', 'galactic', 'action', 'stellar', 'cellular', 'continental', 'axial', 'operative', 'disciplinary'] @@ -54,7 +62,7 @@ def test_make_words_groups_inter(self): self.assertEqual(make_word_groups(input_data), result_data, msg=f'Expected {result_data} but got something else instead.') - + @pytest.mark.task(taskno=3) def test_remove_suffix_ness(self): input_data = ["heaviness", "sadness", "softness", "crabbiness", "lightness", "artiness", "edginess"] result_data = ["heavy", "sad", "soft", 'crabby', 'light', 'arty', 'edgy'] @@ -65,6 +73,7 @@ def test_remove_suffix_ness(self): self.assertEqual(remove_suffix_ness(word), result, msg=f'Expected: {result} but got a different word instead.') + @pytest.mark.task(taskno=4) def test_noun_to_verb(self): input_data = ['Look at the bright sky.', 'His expression went dark.', @@ -83,4 +92,4 @@ def test_noun_to_verb(self): for variant, sentence, index, result in zip(number_of_variants, input_data, index_data, result_data): with self.subTest(f"variation #{variant}", sentence=sentence, index=index, result=result): self.assertEqual(noun_to_verb(sentence, index), result, - msg=f'Expected: {result} but got a different word instead.') \ No newline at end of file + msg=f'Expected: {result} but got a different word instead.') From 437bb8a3b6871638ececa7d1cd08a5db995afd15 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Sat, 29 May 2021 12:57:40 -0700 Subject: [PATCH 11/16] Update exercises/concept/little-sisters-vocab/.docs/instructions.md Co-authored-by: Tim Austin --- exercises/concept/little-sisters-vocab/.docs/instructions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/concept/little-sisters-vocab/.docs/instructions.md b/exercises/concept/little-sisters-vocab/.docs/instructions.md index 7fc068bf10..cf462f99b8 100644 --- a/exercises/concept/little-sisters-vocab/.docs/instructions.md +++ b/exercises/concept/little-sisters-vocab/.docs/instructions.md @@ -1,6 +1,6 @@ # Instructions -You're helping your younger sister with her English vocabulary homework, which she's finding very tedious. Her class is learning to create new words by adding _prefixes_ and _suffixes_, and have been given different sets of words to modify. The teacher is looking for correctly transformed words and correct spelling, given the word beginning or ending. +You're helping your younger sister with her English vocabulary homework, which she's finding very tedious. Her class is learning to create new words by adding _prefixes_ and _suffixes_. Given a set of words, the teacher is looking for correctly transformed words with correct spelling by adding the prefix to the beginning or the suffix to the ending. There's four activities in the assignment, each with a set of text or words to work with. From 316f53baec4e3f671e3d4ad0bf46f3a3b14cce1b Mon Sep 17 00:00:00 2001 From: BethanyG Date: Sat, 29 May 2021 12:58:20 -0700 Subject: [PATCH 12/16] Update config.json Co-authored-by: Tim Austin --- config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.json b/config.json index d3a0f7e3c5..aaeeec300f 100644 --- a/config.json +++ b/config.json @@ -29,7 +29,7 @@ "concept": [ { "slug": "little-sisters-vocab", - "name": "Little Sisters Vocab", + "name": "Little Sister's Vocabulary", "uuid": "5a9b42fb-ddf4-424b-995d-f9952ea63c37", "concepts": ["strings"], "prerequisites": ["basics"], From 1c629bc7e0cdf69ab66f3a587617faf9dd7a8620 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Sat, 29 May 2021 12:58:53 -0700 Subject: [PATCH 13/16] Update exercises/concept/little-sisters-vocab/.docs/hints.md Co-authored-by: Tim Austin --- exercises/concept/little-sisters-vocab/.docs/hints.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/exercises/concept/little-sisters-vocab/.docs/hints.md b/exercises/concept/little-sisters-vocab/.docs/hints.md index 6fd94d1622..c328d2256e 100644 --- a/exercises/concept/little-sisters-vocab/.docs/hints.md +++ b/exercises/concept/little-sisters-vocab/.docs/hints.md @@ -6,8 +6,6 @@ - String methods [.join()][str-join] and [.split()][str-split] ar very helpful when processing strings. - The [Python Docs on Sequence Types][common sequence operations] has a rundown of operations common to all sequences, including `strings`, `lists`, `tuples`, and `ranges`. -You're helping your younger sister with her English vocabulary homework, which she's finding very tedious. Her class is learning to create new words by adding _prefixes_ and _suffixes_, and have been given different sets of words to modify. The teacher is looking for correctly transformed words and correct spelling, given the word beginning or ending. - There's four activities in the assignment, each with a set of text or words to work with. ## 1. Add a prefix to a word From 324d7f7d93cb5d545d84690121babd8fd20d254a Mon Sep 17 00:00:00 2001 From: BethanyG Date: Sat, 29 May 2021 13:18:10 -0700 Subject: [PATCH 14/16] Apply suggestions from code review Suggestions/corrections/edits from code review. Co-authored-by: Tim Austin --- .../little-sisters-vocab/.docs/hints.md | 2 +- .../little-sisters-vocab/.docs/instructions.md | 18 +++++++++--------- .../little-sisters-vocab/.docs/introduction.md | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/exercises/concept/little-sisters-vocab/.docs/hints.md b/exercises/concept/little-sisters-vocab/.docs/hints.md index c328d2256e..f5e01ed3ea 100644 --- a/exercises/concept/little-sisters-vocab/.docs/hints.md +++ b/exercises/concept/little-sisters-vocab/.docs/hints.md @@ -14,7 +14,7 @@ There's four activities in the assignment, each with a set of text or words to w ## 2. Add prefixes to word groups -- Believe it or not, .join() is all you need. +- Believe it or not, `.join()` is all you need. - Like `.split()`, `.join()` can take an arbitrary-length string, made up of any unicode code points. ## 3. Remove a suffix from a word diff --git a/exercises/concept/little-sisters-vocab/.docs/instructions.md b/exercises/concept/little-sisters-vocab/.docs/instructions.md index cf462f99b8..19d703ad7b 100644 --- a/exercises/concept/little-sisters-vocab/.docs/instructions.md +++ b/exercises/concept/little-sisters-vocab/.docs/instructions.md @@ -14,21 +14,21 @@ Implement the `add_prefix_un()` function that takes `word` as a parameter and re >>> add_prefix_un("happy") 'unhappy' ->>> add_prefix("manageable") +>>> add_prefix_un("manageable") 'unmanagable' ``` ## 2. Add prefixes to word groups -There are four more common prefixes that your sister's class is studying: `en` (_meaning to 'put into' or 'cover with'_), `pre` (_meaning before or forward _), `auto` (_meaning self or same_), and `inter` (_meaning between or among_). In this exercise, the class is creating groups of vocabulary words using these prefixes, so they can be studied together. Each prefix comes in a list with common words it's used with. The students need to apply the prefix and produce a string that shows the prefix and the words with the prefix applied. +There are four more common prefixes that your sister's class is studying: `en` (_meaning to 'put into' or 'cover with'_), `pre` (_meaning 'before' or 'forward'_), `auto` (_meaning 'self' or 'same'_), and `inter` (_meaning 'between' or 'among'_). In this exercise, the class is creating groups of vocabulary words using these prefixes, so they can be studied together. Each prefix comes in a list with common words it's used with. The students need to apply the prefix and produce a string that shows the prefix applied to all of the words. Implement the `make_word_groups()` function that takes a `vocab_words` as a parameter in the following form: [``, `word_1`, `word_2` .... `word_n`], and returns a string with the prefix applied to each word that looks like `' :: :: :: '`. ```python ->>> make_word_groups(['en', close, joy, lighten]) +>>> make_word_groups(['en', 'close', 'joy', 'lighten']) 'en :: enclose :: enjoy :: enlighten' ->>> make_word_groups(['pre', serve, dispose, position]) +>>> make_word_groups(['pre', 'serve', 'dispose', 'position']) 'pre :: preserve :: predispose :: preposition' >> make_word_groups(['auto', 'didactic', 'graph', 'mate']) @@ -40,9 +40,9 @@ Implement the `make_word_groups()` function that takes a `vocab_words` as a para ## 3. Remove a suffix from a word -`ness` is a common suffix that means "state of being". In this activity, your sister needs to "extract" the original root word by removing the `ness` suffix. But of course there are pesky spelling rules. If the root word originally ended in a consonant followed by a 'y', then the 'y' was changed to to 'i'. So removing the 'ness' needs to restore the 'y' in the original word. `happiness` --> `happi` --> `happy`. +`ness` is a common suffix that means _'state of being'_. In this activity, your sister needs to find the original root word by removing the `ness` suffix. But of course there are pesky spelling rules: If the root word originally ended in a consonant followed by a 'y', then the 'y' was changed to to 'i'. Removing 'ness' needs to restore the 'y' in those root words. e.g. `happiness` --> `happi` --> `happy`. -Implement the `remove_suffix_ness()` function that takes in a word and returns the base word with `ness` removed. +Implement the `remove_suffix_ness()` function that takes in a word and returns the root word without the `ness` suffix. ```python >>> remove_suffix_ness("heaviness") @@ -54,11 +54,11 @@ Implement the `remove_suffix_ness()` function that takes in a word and returns t ## 4. Extract and transform a word -Suffixes are often used to change the part of speech a word has. A common practice in English is "verbing" or "verbifying" -- where a ajective _becomes_ a verb by adding an `en` suffix. +Suffixes are often used to change the part of speech a word has. A common practice in English is "verbing" or "verbifying" -- where a adjective _becomes_ a verb by adding an `en` suffix. -In this task, your sister is going to practice "verbing" by extracting an ajective from a sentence and turning it into a verb. Fortunately, all the words that need to be transformed here are "regular" - they don't need spelling changes to add the suffix. +In this task, your sister is going to practice "verbing" words by extracting an adjective from a sentence and turning it into a verb. Fortunately, all the words that need to be transformed here are "regular" - they don't need spelling changes to add the suffix. -Implement the `noun_to_verb()` function that takes two parameters. A `sentence` using the vocabulary word, and the `index` of the word, once that sentence is split apart. The function should return the extracted ajective as a verb. +Implement the `noun_to_verb()` function that takes two parameters. A `sentence` using the vocabulary word, and the `index` of the word, once that sentence is split apart. The function should return the extracted adjective as a verb. ```python >>> noun_to_verb('I need to make that bright.', -1 ) diff --git a/exercises/concept/little-sisters-vocab/.docs/introduction.md b/exercises/concept/little-sisters-vocab/.docs/introduction.md index 9ec38f8bb7..0020dcba0d 100644 --- a/exercises/concept/little-sisters-vocab/.docs/introduction.md +++ b/exercises/concept/little-sisters-vocab/.docs/introduction.md @@ -13,11 +13,11 @@ A `str` literal can be declared via single `'` or double `"` quotes. The escape >>> double_quoted = "These allow embedded 'single quoting', so you don't have to use an 'escape' character". ``` -Muliti-line strings are declared with `'''` or `"""`. +Multi-line strings are declared with `'''` or `"""`. ```python >>> triple_quoted = '''Three single quotes or "double quotes" in a row allow for multi-line string literals. - Line break characters, tabs and other whitespace is fully supported. + Line break characters, tabs and other whitespace are fully supported. You\'ll most often encounter these as "doc strings" or "doc tests" written just below the first line of a function or class definition. They\'re often used with auto documentation ✍ tools. From 30f13724c46da1a15b974602ad666530d18f5d51 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Sat, 29 May 2021 15:12:50 -0700 Subject: [PATCH 15/16] Update exercises/concept/little-sisters-vocab/.docs/introduction.md added escape character example. --- exercises/concept/little-sisters-vocab/.docs/introduction.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/exercises/concept/little-sisters-vocab/.docs/introduction.md b/exercises/concept/little-sisters-vocab/.docs/introduction.md index 0020dcba0d..4213fa68b8 100644 --- a/exercises/concept/little-sisters-vocab/.docs/introduction.md +++ b/exercises/concept/little-sisters-vocab/.docs/introduction.md @@ -11,6 +11,8 @@ A `str` literal can be declared via single `'` or double `"` quotes. The escape >>> single_quoted = 'These allow "double quoting" without "escape" characters.' >>> double_quoted = "These allow embedded 'single quoting', so you don't have to use an 'escape' character". + +>>> escapes = 'If needed, a \'slash\' can be used as an escape character within a string when switching quote styles won\'t work.' ``` Multi-line strings are declared with `'''` or `"""`. From 9593a8f45dc3d7610d805504c45159adc78c0328 Mon Sep 17 00:00:00 2001 From: BethanyG Date: Sat, 29 May 2021 15:28:43 -0700 Subject: [PATCH 16/16] Update exercises/concept/little-sisters-vocab/.docs/introduction.md added three step examples per suggestions on code review. --- .../little-sisters-vocab/.docs/introduction.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/exercises/concept/little-sisters-vocab/.docs/introduction.md b/exercises/concept/little-sisters-vocab/.docs/introduction.md index 4213fa68b8..19427b6436 100644 --- a/exercises/concept/little-sisters-vocab/.docs/introduction.md +++ b/exercises/concept/little-sisters-vocab/.docs/introduction.md @@ -110,6 +110,7 @@ If no `stop` is given, the `stop` index will be the end of the string. ```python moon_and_stars = '🌟🌟🌙🌟🌟⭐' +sun_and_moon = sun_and_moon = '🌞🌙🌞🌙🌞🌙🌞🌙🌞' >>> moon_and_stars[1:4] '🌟🌙🌟' @@ -125,7 +126,16 @@ moon_and_stars = '🌟🌟🌙🌟🌟⭐' >>> moon_and_stars[:-3] '🌟🌟🌙' -``` + +>>> sun_and_moon[::2] +'🌞🌞🌞🌞🌞' + +>>> sun_and_moon[:-2:2] +'🌞🌞🌞🌞' + +>>> sun_and_moon[1:-1:2] +'🌙🌙🌙🌙' + Strings can also be broken into smaller strings via [`.split()`][str-split], which will return a `list` of substrings. The list can then be further indexed or split, if needed.