diff --git a/.github/workflows/learning-checks.yml b/.github/workflows/learning-checks.yml index 08f6854..7fd7baf 100644 --- a/.github/workflows/learning-checks.yml +++ b/.github/workflows/learning-checks.yml @@ -27,5 +27,5 @@ jobs: uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd - name: Generate .meta/known-words.md run: bin/extract-known-words - - name: Detect unknown words in concept exemplars - run: bin/detect-unknown-words concept + - name: Detect unknown words in exemplars and examples + run: bin/detect-unknown-words diff --git a/bin/detect-unknown-words b/bin/detect-unknown-words index 0b9be43..c06f8f5 100755 --- a/bin/detect-unknown-words +++ b/bin/detect-unknown-words @@ -163,12 +163,40 @@ def parse_solution(path): return defined, locals_, used +# Syntactic features that don't tokenise as ordinary words but still +# require a concept be in the prereq chain. The detector filters these +# tokens (so they never appear in the per-word "unknown" list), so we +# need an explicit presence check. +LOCALS_SYNTAX = re.compile(r'(?:^|\s)(?:::|:>|\[\|)(?:\s|$)') + + +def reachable_concepts(prereqs): + """Return the set of concept slugs reachable through the prereq chain.""" + concepts = set() + for ex_slug in transitive_prereq_concept_exes(prereqs): + concepts.update(EX_META[ex_slug].get('concepts', [])) + return concepts + + +def syntax_violations(sol_path, reachable): + """Return concept slugs whose syntactic features appear in the source + but whose teaching exercise is not reachable through the prereq chain.""" + text = sol_path.read_text() + violations = [] + if LOCALS_SYNTAX.search(text) and 'locals' not in reachable: + violations.append('') + return violations + + def check_concept(slug, sol_path): """Concept exercise: known = its own known-words.md (which already includes own intro plus transitive prereq intros).""" known = known_words_for_concept_ex(slug) defined, locals_, used = parse_solution(sol_path) - return sorted(used - known - defined - locals_) + unknowns = sorted(used - known - defined - locals_) + own_concepts = set(EX_META[slug].get('concepts', [])) + reachable = own_concepts | reachable_concepts(EX_META[slug].get('prerequisites', [])) + return unknowns + syntax_violations(sol_path, reachable) def check_practice(prereqs, sol_path): @@ -178,7 +206,8 @@ def check_practice(prereqs, sol_path): for cx in transitive_prereq_concept_exes(prereqs): known |= known_words_for_concept_ex(cx) defined, locals_, used = parse_solution(sol_path) - return sorted(used - known - defined - locals_) + unknowns = sorted(used - known - defined - locals_) + return unknowns + syntax_violations(sol_path, reachable_concepts(prereqs)) def main(): diff --git a/concepts/arrays/about.md b/concepts/arrays/about.md index 488e0ea..f4f89f4 100644 --- a/concepts/arrays/about.md +++ b/concepts/arrays/about.md @@ -10,6 +10,7 @@ the stack: | `1array` | `( a -- { a } )` | | `2array` | `( a b -- { a b } )` | | `3array` | `( a b c -- { a b c } )` | +| `array?` | `( obj -- ? )` — type predicate | A few protocol words from `sequences` come up so often with arrays that they are worth knowing as a unit: diff --git a/concepts/conditionals/about.md b/concepts/conditionals/about.md index edaec41..5f8c350 100644 --- a/concepts/conditionals/about.md +++ b/concepts/conditionals/about.md @@ -10,6 +10,14 @@ when ( ? quot -- ) unless ( ? quot -- ) ``` +Two starred variants treat the boolean as a value worth using +when it's truthy — the canonical "value-or-`f`" pair: + +``` +if* ( ? true false -- ) ! truthy: true called WITH value +unless* ( ? false -- ) ! falsy: false runs and pushes default +``` + ```factor : abs ( x -- y ) dup 0 < [ neg ] [ ] if ; : shout ( s -- ) dup empty? [ drop ] [ >upper print ] if ; diff --git a/concepts/hash-sets/.meta/config.json b/concepts/hash-sets/.meta/config.json new file mode 100644 index 0000000..b10db57 --- /dev/null +++ b/concepts/hash-sets/.meta/config.json @@ -0,0 +1,6 @@ +{ + "authors": [ + "keiravillekode" + ], + "blurb": "Track unique values with mutable hash-sets — the canonical visited set for graph traversals." +} diff --git a/concepts/hash-sets/about.md b/concepts/hash-sets/about.md new file mode 100644 index 0000000..642c68a --- /dev/null +++ b/concepts/hash-sets/about.md @@ -0,0 +1,59 @@ +# About + +Hash-sets implement the [`sets`][sets] protocol with hashing +under the hood, giving O(1) average insert, lookup, and delete. +They're mutable in place, which makes them ideal for the +*visited set* pattern in graph traversals. + +```factor +USING: hash-sets kernel sets ; + +HS{ } clone ! fresh empty mutable set +"NS-1024" over adjoin ! insert (no-op if already present) +"NS-1024" over in? . ! => t (membership test) +"WB-203" over adjoin +over cardinality . ! => 2 +"NS-1024" over delete ! remove +over cardinality . ! => 1 +``` + +| word | effect | +|---------------|-------------------------------------------| +| `HS{ }` | empty hash-set literal (shared — `clone` it!) | +| `adjoin` | `( elt set -- )` — destructive insert | +| `in?` | `( elt set -- ? )` — membership | +| `delete` | `( elt set -- )` — destructive remove | +| `cardinality` | `( set -- n )` — number of elements | +| `members` | `( set -- seq )` — enumerate as sequence | +| `union` | `( set1 set2 -- set )` | +| `intersect` | `( set1 set2 -- set )` | +| `diff` | `( set1 set2 -- set )` | + +A subtle point about `in?` versus `member?` (from +`sequences`): both test membership, but `member?` does a +linear scan over a sequence, while `in?` dispatches to whatever +the set type's protocol method is — for `HS{ }`, that's a +hash lookup. Use `in?` once your "visited" container has more +than a handful of entries. + +## Pairs nicely with hashtables + +`HS{ }` for "is X visited?" pairs naturally with `H{ }` for +"who are X's neighbours?". A textbook BFS is just: + +```factor +visited adjoin +queue push +[ queue empty? not ] [ + queue pop dup neighbours-quot call [ + dup visited in? [ drop ] [ + [ visited adjoin ] [ queue push ] bi + ] if + ] each +] while +``` + +The visited set deduplicates work; the queue threads frontier +nodes; the neighbours map (a hashtable) supplies the graph. + +[sets]: https://docs.factorcode.org/content/vocab-sets.html diff --git a/concepts/hash-sets/introduction.md b/concepts/hash-sets/introduction.md new file mode 100644 index 0000000..60bd58f --- /dev/null +++ b/concepts/hash-sets/introduction.md @@ -0,0 +1,6 @@ +# Introduction + +Hash-sets are mutable, unordered collections that store each +value at most once. Lookup, insert, and delete are all O(1) +average. They're the natural choice for "have I seen this +before?" — the canonical *visited set* of graph traversals. diff --git a/concepts/hash-sets/links.json b/concepts/hash-sets/links.json new file mode 100644 index 0000000..b2d62d6 --- /dev/null +++ b/concepts/hash-sets/links.json @@ -0,0 +1,10 @@ +[ + { + "url": "https://docs.factorcode.org/content/vocab-sets.html", + "description": "sets vocabulary reference" + }, + { + "url": "https://docs.factorcode.org/content/vocab-hash-sets.html", + "description": "hash-sets vocabulary reference" + } +] diff --git a/concepts/higher-order-sequences/about.md b/concepts/higher-order-sequences/about.md index cf4964b..2584faf 100644 --- a/concepts/higher-order-sequences/about.md +++ b/concepts/higher-order-sequences/about.md @@ -23,6 +23,7 @@ The full common cast: | `filter` | `( seq quot -- newseq )` | | `reject` | `( seq quot -- newseq )` | | `find` | `( seq quot -- i/f elt/f )` | +| `find-last` | `( seq quot -- i/f elt/f )` | | `reduce` | `( seq init quot -- result )` | | `count` | `( seq quot -- n )` | | `any?` | `( seq quot -- ? )` | @@ -33,6 +34,12 @@ The full common cast: | `infimum` | `( seq -- elt )` | | `supremum` | `( seq -- elt )` | +When *your* word forwards a runtime quotation to one of these +combinators, declare your word with `; inline` so the +combinator's effect inference can see the quotation's shape at +the call site. Words built only from literal quotations don't +need it. + Beyond the core sequence ops: | vocab | provides | diff --git a/concepts/numbers/about.md b/concepts/numbers/about.md index 9b36efb..18b024a 100644 --- a/concepts/numbers/about.md +++ b/concepts/numbers/about.md @@ -18,9 +18,11 @@ Most arithmetic lives in [`math`][math]: `+`, `-`, `*`, `/`, `mod`, `even?`, `odd?`, `positive?`, `negative?`. Comparison: `<`, `<=`, `>`, `>=`, `=`. From [`math.order`][math.order]: `min`, `max`, `between?`. From [`math.functions`][math.functions]: `floor`, -`ceiling`, `round`. Constructors and conversions like `>integer`, -`>float`, `>fraction` are nearby. +`ceiling`, `round`, `divisor?`. From [`math.primes`][math.primes]: +`prime?`. Constructors and conversions like `>integer`, `>float`, +`>fraction` are nearby. [math]: https://docs.factorcode.org/content/vocab-math.html [math.order]: https://docs.factorcode.org/content/vocab-math.order.html [math.functions]: https://docs.factorcode.org/content/vocab-math.functions.html +[math.primes]: https://docs.factorcode.org/content/vocab-math.primes.html diff --git a/concepts/reductions/about.md b/concepts/reductions/about.md index cbc7e7d..764462a 100644 --- a/concepts/reductions/about.md +++ b/concepts/reductions/about.md @@ -17,15 +17,32 @@ USING: math sequences ; ``` A custom seed lets `reduce` express folds that `sum` or `product` -cannot. For example, applying a per-step floor: +cannot. For example, the largest value in a sequence with a +default if every element loses: ```factor -100 { 50 -200 30 } [ + 0 max ] reduce . ! => 30 +USING: math.order ; + +{ 3 1 -4 5 -2 } 0 [ max ] reduce . ! => 5 +{ -3 -1 -4 } 0 [ max ] reduce . ! => 0 +``` + +The seed `0` participates in the comparison, so an all-negative +sequence still produces `0` rather than its smallest value. + +## `produce` — the unfold + +The dual of `reduce` is `produce` (in [`sequences`][sequences]): +where `reduce` *consumes* a sequence, `produce` *generates* +one. A predicate quotation tests the running state; while it +returns truthy, a body quotation produces the next element. + +``` +produce ( pred quot -- seq ) ``` -(`100 + 50 = 150`; `150 + (-200) = -50`, floored to `0`; -`0 + 30 = 30`.) The floor at each step matters — `sum +` would -produce `-20`. +Used together, `reduce` and `produce` form Factor's fold/unfold +pair. ## Cumulative reductions @@ -50,7 +67,9 @@ USING: math.statistics ; ``` Cumulative reductions are useful when you want to inspect *how* -a quantity evolved across the sequence, not only its final value -— running balances, peak watermarks, low watermarks, and so on. +a quantity evolved across the sequence, not only its final +value. Chaining them is also useful: the output of one +cumulative is itself a sequence, ready to feed into another. [math.statistics]: https://docs.factorcode.org/content/vocab-math.statistics.html +[sequences]: https://docs.factorcode.org/content/vocab-sequences.html diff --git a/concepts/sequences/about.md b/concepts/sequences/about.md index d042a6c..e2368c1 100644 --- a/concepts/sequences/about.md +++ b/concepts/sequences/about.md @@ -9,7 +9,12 @@ with the others. |-------------|-----------------------------------| | `length` | `( seq -- n )` | | `first` | `( seq -- elt )` | +| `second` | `( seq -- elt )` | +| `third` | `( seq -- elt )` | +| `fourth` | `( seq -- elt )` | | `last` | `( seq -- elt )` | +| `rest` | `( seq -- tailseq )` | +| `but-last` | `( seq -- headseq )` | | `nth` | `( n seq -- elt )` (0-based) | | `head` | `( seq n -- headseq )` | | `tail` | `( seq n -- tailseq )` | @@ -20,12 +25,16 @@ with the others. | `unclip` | `( seq -- rest first )` | | `unclip-last` | `( seq -- butlast last )` | | `empty?` | `( seq -- ? )` | +| `if-empty` | `( seq emptyquot nonemptyquot -- … )` | | `member?` | `( elt seq -- ? )` | | `reverse` | `( seq -- newseq )` | | `index` | `( elt seq -- i/f )` | | `concat` | `( seqs -- seq )` | | `sum` | `( seq -- n )` | | `product` | `( seq -- n )` | +| `find` | `( seq quot -- i/f elt/f )` | +| `find-last` | `( seq quot -- i/f elt/f )` | +| `produce` | `( pred quot -- seq )` | Arrays are immutable; the `prefix`/`suffix`/`append` operations all return new sequences without modifying the original. Vectors are diff --git a/concepts/windows/.meta/config.json b/concepts/windows/.meta/config.json new file mode 100644 index 0000000..0a49697 --- /dev/null +++ b/concepts/windows/.meta/config.json @@ -0,0 +1,6 @@ +{ + "authors": [ + "keiravillekode" + ], + "blurb": "View a sequence as windows: disjoint chunks, sliding windows, predicate breaks, and adjacent-pair runs." +} diff --git a/concepts/windows/about.md b/concepts/windows/about.md new file mode 100644 index 0000000..93258ff --- /dev/null +++ b/concepts/windows/about.md @@ -0,0 +1,44 @@ +# About + +Four words from [`grouping`][grouping] and +[`splitting`][splitting] cover almost every "cut a sequence into +pieces" need: + +```factor +USING: grouping kernel sequences splitting splitting.monotonic ; + +! Disjoint chunks of N elements +{ 1 2 3 4 5 6 7 } 3 group . +! => { { 1 2 3 } { 4 5 6 } { 7 } } + +! Overlapping sliding window of N elements +{ 1 2 3 4 5 } 2 clump . +! => { { 1 2 } { 2 3 } { 3 4 } { 4 5 } } + +! Split wherever a predicate fires on an element +{ 1 2 0 3 4 0 5 } [ zero? ] split-when . +! => { { 1 2 } { 3 4 } { 5 } } + +! Group runs where adjacent elements are "the same" by some test +{ 1 1 2 2 2 3 } [ = ] monotonic-split . +! => { { 1 1 } { 2 2 2 } { 3 } } +``` + +| word | effect | +|-------------------|-------------------------------------------------------------------------| +| `group` | `( seq n -- groups )` — disjoint *n*-sized chunks | +| `clump` | `( seq n -- clumps )` — overlapping *n*-sized windows | +| `split-when` | `( seq quot: ( elt -- ? ) -- pieces )` — break when quot is truthy | +| `monotonic-split` | `( seq quot: ( a b -- ? ) -- pieces )` — break when *adjacent* quot is `f` | + +A useful mental model: + +- **`group` / `clump`** look at sizes; the *n* parameter says + how big each window is. +- **`split-when`** looks at *single elements*; the predicate + decides whether each element is a "break." +- **`monotonic-split`** looks at *pairs*; the predicate decides + whether two adjacent elements still belong in the same run. + +[grouping]: https://docs.factorcode.org/content/vocab-grouping.html +[splitting]: https://docs.factorcode.org/content/vocab-splitting.html diff --git a/concepts/windows/introduction.md b/concepts/windows/introduction.md new file mode 100644 index 0000000..0528462 --- /dev/null +++ b/concepts/windows/introduction.md @@ -0,0 +1,7 @@ +# Introduction + +Sometimes you need to view a sequence as a series of *pieces* +rather than element-by-element. Factor's grouping and splitting +words give you four ways to do that: fixed-size disjoint chunks, +overlapping sliding windows, predicate-driven breaks, and +adjacent-pair "run" grouping. diff --git a/concepts/windows/links.json b/concepts/windows/links.json new file mode 100644 index 0000000..689eefd --- /dev/null +++ b/concepts/windows/links.json @@ -0,0 +1,10 @@ +[ + { + "url": "https://docs.factorcode.org/content/vocab-grouping.html", + "description": "grouping vocabulary reference" + }, + { + "url": "https://docs.factorcode.org/content/vocab-splitting.html", + "description": "splitting vocabulary reference" + } +] diff --git a/config.json b/config.json index fca1269..9bdccd2 100644 --- a/config.json +++ b/config.json @@ -215,20 +215,6 @@ ], "status": "beta" }, - { - "slug": "mixed-juices", - "name": "Mixed Juices", - "uuid": "d8a20e2e-c1b1-4bd3-aca0-c942ad5a465b", - "concepts": [ - "while" - ], - "prerequisites": [ - "combinators", - "higher-order-sequences", - "conditionals" - ], - "status": "beta" - }, { "slug": "ledger-loop", "name": "Ledger Loop", @@ -270,6 +256,18 @@ ], "status": "beta" }, + { + "slug": "belgian-boxcars", + "name": "Belgian Boxcars", + "uuid": "ba63576a-4d1a-49a1-aa14-07e6a4bfbbc1", + "concepts": [ + "windows" + ], + "prerequisites": [ + "higher-order-sequences" + ], + "status": "beta" + }, { "slug": "role-playing-game", "name": "Role Playing Game", @@ -375,6 +373,36 @@ ], "status": "beta" }, + { + "slug": "mixed-juices", + "name": "Mixed Juices", + "uuid": "d8a20e2e-c1b1-4bd3-aca0-c942ad5a465b", + "concepts": [ + "while" + ], + "prerequisites": [ + "combinators", + "higher-order-sequences", + "conditionals", + "locals" + ], + "status": "beta" + }, + { + "slug": "lighthouse-logbook", + "name": "Lighthouse Logbook", + "uuid": "616f2c19-8267-4a6e-97dd-cc777e71f0ae", + "concepts": [ + "hash-sets" + ], + "prerequisites": [ + "assocs", + "mutation", + "locals", + "while" + ], + "status": "beta" + }, { "slug": "garden-gathering", "name": "Garden Gathering", @@ -629,7 +657,8 @@ "strings", "unicode", "higher-order-sequences", - "tabulation" + "tabulation", + "windows" ], "difficulty": 3 }, @@ -691,7 +720,8 @@ "sequences", "higher-order-sequences", "case", - "indexed-iteration" + "indexed-iteration", + "locals" ], "difficulty": 3 }, @@ -749,7 +779,8 @@ "numbers", "sequences", "while", - "mutation" + "mutation", + "locals" ], "difficulty": 3 }, @@ -763,7 +794,8 @@ "prerequisites": [ "conditionals", "strings", - "higher-order-sequences" + "higher-order-sequences", + "recursion" ], "difficulty": 3 }, @@ -792,7 +824,8 @@ "prerequisites": [ "curry-compose-fry", "strings", - "unicode" + "unicode", + "locals" ], "difficulty": 3 }, @@ -826,6 +859,18 @@ ], "difficulty": 3 }, + { + "slug": "allergies", + "name": "Allergies", + "uuid": "fbe640a0-d4d1-4cf4-8a8d-60c1c604b57a", + "practices": [], + "prerequisites": [ + "hash-sets", + "bitwise-operations", + "indexed-iteration" + ], + "difficulty": 4 + }, { "slug": "armstrong-numbers", "name": "Armstrong Numbers", @@ -840,7 +885,8 @@ "numbers", "sequences", "while", - "mutation" + "mutation", + "locals" ], "difficulty": 4 }, @@ -871,7 +917,8 @@ "numbers", "higher-order-sequences", "errors", - "unicode" + "unicode", + "windows" ], "difficulty": 4 }, @@ -962,7 +1009,9 @@ "prerequisites": [ "strings", "assocs", - "unicode" + "unicode", + "windows", + "locals" ], "difficulty": 4 }, @@ -988,7 +1037,9 @@ "numbers", "strings", "unicode", - "errors" + "errors", + "windows", + "locals" ], "difficulty": 5 }, @@ -1034,7 +1085,8 @@ "higher-order-sequences", "unicode", "locals", - "quotations-call" + "quotations-call", + "windows" ], "difficulty": 5 }, @@ -1069,7 +1121,8 @@ "sequences", "mutation", "reductions", - "indexed-iteration" + "indexed-iteration", + "locals" ], "difficulty": 5 }, @@ -1085,7 +1138,8 @@ "recursion", "strings", "case", - "mutation" + "mutation", + "locals" ], "difficulty": 5 }, @@ -1100,7 +1154,9 @@ "strings", "recursion", "unicode", - "arrays" + "arrays", + "windows", + "locals" ], "difficulty": 5 }, @@ -1130,7 +1186,8 @@ "higher-order-sequences", "sequences", "case", - "curry-compose-fry" + "curry-compose-fry", + "locals" ], "difficulty": 5 }, @@ -1146,7 +1203,20 @@ "errors", "higher-order-sequences", "numbers", - "sequences" + "sequences", + "reductions", + "locals" + ], + "difficulty": 6 + }, + { + "slug": "change", + "name": "Change", + "uuid": "2de1df4d-c793-4db8-b6fb-e10484fe7ec8", + "practices": [], + "prerequisites": [ + "hash-sets", + "tabulation" ], "difficulty": 6 }, @@ -1160,7 +1230,8 @@ "prerequisites": [ "higher-order-sequences", "sequences", - "bitwise-operations" + "bitwise-operations", + "locals" ], "difficulty": 6 }, @@ -1175,7 +1246,9 @@ "strings", "sequences", "assocs", - "errors" + "errors", + "windows", + "locals" ], "difficulty": 6 }, @@ -1191,7 +1264,8 @@ "sequences", "arrays", "higher-order-sequences", - "mutation" + "mutation", + "locals" ], "difficulty": 6 }, @@ -1206,7 +1280,8 @@ "numbers", "recursion", "strings", - "errors" + "errors", + "locals" ], "difficulty": 6 }, @@ -1236,7 +1311,8 @@ "conditionals", "errors", "numbers", - "while" + "while", + "locals" ], "difficulty": 7 }, @@ -1250,7 +1326,8 @@ "prerequisites": [ "higher-order-sequences", "sequences", - "strings" + "strings", + "locals" ], "difficulty": 7 }, @@ -1304,7 +1381,8 @@ "higher-order-sequences", "combinators", "errors", - "mutation" + "mutation", + "locals" ], "difficulty": 8 }, @@ -1321,7 +1399,8 @@ "conditionals", "sequences", "higher-order-sequences", - "mutation" + "mutation", + "locals" ], "difficulty": 8 }, @@ -1417,11 +1496,6 @@ "slug": "mutation", "name": "Mutation" }, - { - "uuid": "e6daff41-60a4-4b82-a245-411150d56dfa", - "slug": "while", - "name": "While" - }, { "uuid": "fbd0d673-64ba-4de7-aa4d-23654172a252", "slug": "reductions", @@ -1437,6 +1511,11 @@ "slug": "indexed-iteration", "name": "Indexed Iteration" }, + { + "uuid": "1d81f2d0-a8fe-472a-b9ff-b64cd09335a4", + "slug": "windows", + "name": "Windows" + }, { "uuid": "e5594192-3b54-40f3-ba35-e45ad2f7a007", "slug": "tuples", @@ -1472,6 +1551,16 @@ "slug": "locals", "name": "Locals" }, + { + "uuid": "e6daff41-60a4-4b82-a245-411150d56dfa", + "slug": "while", + "name": "While" + }, + { + "uuid": "fa730f6b-2c32-4890-8b11-3103615e41b4", + "slug": "hash-sets", + "name": "Hash Sets" + }, { "uuid": "c8a58a3d-d6e4-4325-b0c9-d2ee4bfc9df3", "slug": "dynamic-variables", diff --git a/exercises/concept/annalyns-infiltration/.docs/instructions.md b/exercises/concept/annalyns-infiltration/.docs/instructions.md index e324864..b572957 100644 --- a/exercises/concept/annalyns-infiltration/.docs/instructions.md +++ b/exercises/concept/annalyns-infiltration/.docs/instructions.md @@ -59,11 +59,13 @@ Annalyn can try sneaking into the camp to free her friend. This is a risky thing If the prisoner is sleeping, she can't be rescued: she would be startled by Annalyn's sudden appearance and wake up the knight and archer. Implement a word named `can-free-prisoner` that takes four boolean values. -The first three parameters indicate whether the knight, archer and prisoner, respectively, are awake. -The last parameter indicates whether Annalyn's pet dog is present. +The parameters, in order, indicate whether the archer is awake, whether the dog is present, whether the prisoner is awake, and whether the knight is awake. It returns `t` if the 'Free Prisoner' action is available, otherwise `f`: ```factor -f t f f can-free-prisoner . -! => f +f t t f can-free-prisoner . +! => t (archer asleep, dog present, prisoner awake, knight asleep) + +t t f f can-free-prisoner . +! => f (archer is awake, so no rescue possible) ``` diff --git a/exercises/concept/annalyns-infiltration/.docs/introduction.md b/exercises/concept/annalyns-infiltration/.docs/introduction.md index a0a4bf8..eab81ac 100644 --- a/exercises/concept/annalyns-infiltration/.docs/introduction.md +++ b/exercises/concept/annalyns-infiltration/.docs/introduction.md @@ -13,15 +13,18 @@ dup ( x -- x x ) dupd ( x y -- x x y ) drop ( x -- ) swap ( x y -- y x ) +swapd ( x y z -- y x z ) over ( x y -- x y x ) pick ( x y z -- x y z x ) rot ( x y z -- y z x ) -rot ( x y z -- z x y ) +rotd ( w x y z -- w y z x ) spin ( x y z -- z y x ) nip ( x y -- y ) 2dup ( x y -- x y x y ) 2drop ( x y -- ) +2nip ( x y z -- z ) 2swap ( x y z w -- z w x y ) ``` diff --git a/exercises/concept/annalyns-infiltration/.meta/exemplar.factor b/exercises/concept/annalyns-infiltration/.meta/exemplar.factor index 1f8c12d..efdd5c8 100644 --- a/exercises/concept/annalyns-infiltration/.meta/exemplar.factor +++ b/exercises/concept/annalyns-infiltration/.meta/exemplar.factor @@ -1,4 +1,4 @@ -USING: kernel locals ; +USING: kernel ; IN: annalyns-infiltration : can-do-fast-attack ( knight-awake -- ? ) @@ -10,7 +10,5 @@ IN: annalyns-infiltration : can-signal-prisoner ( archer-awake prisoner-awake -- ? ) swap not and ; -:: can-free-prisoner ( knight-awake archer-awake prisoner-awake dog-present -- ? ) - dog-present archer-awake not and - prisoner-awake knight-awake not and archer-awake not and - or ; +: can-free-prisoner ( archer-awake dog-present prisoner-awake knight-awake -- ? ) + not and or swap not and ; diff --git a/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration-tests.factor b/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration-tests.factor index 0da8714..967c495 100644 --- a/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration-tests.factor +++ b/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration-tests.factor @@ -21,18 +21,19 @@ IN: annalyns-infiltration.tests { f } [ t t can-signal-prisoner ] unit-test ! TASK: 4 freeing prisoner +! Args are ( archer-awake dog-present prisoner-awake knight-awake ). { f } [ f f f f can-free-prisoner ] unit-test -{ t } [ f f f t can-free-prisoner ] unit-test +{ f } [ f f f t can-free-prisoner ] unit-test { t } [ f f t f can-free-prisoner ] unit-test -{ t } [ f f t t can-free-prisoner ] unit-test -{ f } [ f t f f can-free-prisoner ] unit-test -{ f } [ f t f t can-free-prisoner ] unit-test -{ f } [ f t t f can-free-prisoner ] unit-test -{ f } [ f t t t can-free-prisoner ] unit-test +{ f } [ f f t t can-free-prisoner ] unit-test +{ t } [ f t f f can-free-prisoner ] unit-test +{ t } [ f t f t can-free-prisoner ] unit-test +{ t } [ f t t f can-free-prisoner ] unit-test +{ t } [ f t t t can-free-prisoner ] unit-test { f } [ t f f f can-free-prisoner ] unit-test -{ t } [ t f f t can-free-prisoner ] unit-test +{ f } [ t f f t can-free-prisoner ] unit-test { f } [ t f t f can-free-prisoner ] unit-test -{ t } [ t f t t can-free-prisoner ] unit-test +{ f } [ t f t t can-free-prisoner ] unit-test { f } [ t t f f can-free-prisoner ] unit-test { f } [ t t f t can-free-prisoner ] unit-test { f } [ t t t f can-free-prisoner ] unit-test diff --git a/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration.factor b/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration.factor index dbaa63f..81a23c5 100644 --- a/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration.factor +++ b/exercises/concept/annalyns-infiltration/annalyns-infiltration/annalyns-infiltration.factor @@ -10,5 +10,5 @@ IN: annalyns-infiltration : can-signal-prisoner ( archer-awake prisoner-awake -- ? ) "unimplemented" throw ; -: can-free-prisoner ( knight-awake archer-awake prisoner-awake dog-present -- ? ) +: can-free-prisoner ( archer-awake dog-present prisoner-awake knight-awake -- ? ) "unimplemented" throw ; diff --git a/exercises/concept/belgian-boxcars/.docs/hints.md b/exercises/concept/belgian-boxcars/.docs/hints.md new file mode 100644 index 0000000..833307a --- /dev/null +++ b/exercises/concept/belgian-boxcars/.docs/hints.md @@ -0,0 +1,25 @@ +# Hints + +## 1. `couple` + +- One word from `grouping` does it: pick the disjoint-chunks + one. + +## 2. `peek-couplings` + +- Sliding window of size 2 — the overlapping cousin of `group`. + +## 3. `split-at-junctions` + +- `split-when` takes a predicate `( elt -- ? )`. Inside it, you + need to ask "is this car one of the junctions?" — that's + `member?` against the `junctions` array. +- The `junctions` value is needed inside the per-element + quotation, which means baking it in with `with` (already + taught in `boutique-bookkeeping`). + +## 4. `coalesce-cargo` + +- `monotonic-split` takes a predicate over *adjacent pairs* + `( a b -- ? )`. You want a run to continue while two adjacent + cars carry the *same* cargo. diff --git a/exercises/concept/belgian-boxcars/.docs/instructions.md b/exercises/concept/belgian-boxcars/.docs/instructions.md new file mode 100644 index 0000000..8770b82 --- /dev/null +++ b/exercises/concept/belgian-boxcars/.docs/instructions.md @@ -0,0 +1,53 @@ +# Instructions + +It's another busy night at the Antwerp marshalling yard. You're +the dispatcher on duty: incoming freight cars need to be +regrouped into outbound trains, couplings inspected, routes split +at junctions, and identical cargo coalesced for billing. + +Each *car* is a string naming what it's carrying: +`"coal"`, `"timber"`, `"crates"`, etc. + +## 1. Couple cars into trains + +Define `couple` to take an array of cars and a length `n`, and +return non-overlapping trains of `n` cars each. The final train +may be shorter if the count doesn't divide evenly. + +```factor +{ "coal" "coal" "coke" "ore" "ore" "timber" "timber" } 3 couple . +! => { { "coal" "coal" "coke" } { "ore" "ore" "timber" } { "timber" } } +``` + +## 2. Peek at the couplings + +Define `peek-couplings` to take an array of cars and return every +adjacent pair, so the inspector can examine each coupling. + +```factor +{ "coal" "ore" "timber" "crates" } peek-couplings . +! => { { "coal" "ore" } { "ore" "timber" } { "timber" "crates" } } +``` + +## 3. Split the chain at junctions + +Define `split-at-junctions` to take an array of cars and an +array of `junctions` (cars that mark a break point), and return +the legs between junction cars. The junction cars themselves +aren't kept. + +```factor +{ "coal" "coal" "switch" "ore" "switch" "timber" } +{ "switch" } split-at-junctions . +! => { { "coal" "coal" } { "ore" } { "timber" } } +``` + +## 4. Coalesce identical cargo + +Define `coalesce-cargo` to take an array of cars and group +*consecutive* cars carrying the same cargo into runs. + +```factor +{ "coal" "coal" "ore" "ore" "ore" "timber" "coal" } coalesce-cargo . +! => { { "coal" "coal" } { "ore" "ore" "ore" } { "timber" } { "coal" } } +``` diff --git a/exercises/concept/belgian-boxcars/.docs/introduction.md b/exercises/concept/belgian-boxcars/.docs/introduction.md new file mode 100644 index 0000000..a442426 --- /dev/null +++ b/exercises/concept/belgian-boxcars/.docs/introduction.md @@ -0,0 +1,81 @@ +# Introduction + +Sometimes you need to view a sequence as a series of *pieces* +rather than element-by-element. The +[`grouping`][grouping] and [`splitting`][splitting] vocabularies +give you four ways to do that. + +## `group` — disjoint chunks of size *n* + +``` +group ( seq n -- groups ) +``` + +```factor +USING: grouping ; + +{ 1 2 3 4 5 6 7 } 3 group . +! => { { 1 2 3 } { 4 5 6 } { 7 } } +``` + +If the sequence length isn't a multiple of *n*, the final group +is shorter than *n*. + +## `clump` — overlapping sliding window of size *n* + +``` +clump ( seq n -- clumps ) +``` + +```factor +USING: grouping ; + +{ 1 2 3 4 5 } 2 clump . +! => { { 1 2 } { 2 3 } { 3 4 } { 4 5 } } +``` + +If the sequence is shorter than *n*, the result is empty. + +## `split-when` — break wherever a predicate fires + +``` +split-when ( seq quot: ( elt -- ? ) -- pieces ) +``` + +```factor +USING: splitting ; + +{ 1 2 0 3 4 0 5 } [ zero? ] split-when . +! => { { 1 2 } { 3 4 } { 5 } } +``` + +Each element gets its own predicate call; truthy elements are +the *break points* and aren't kept in the result. + +## `monotonic-split` — break runs where an adjacent-pair check fails + +``` +monotonic-split ( seq quot: ( a b -- ? ) -- pieces ) +``` + +```factor +USING: splitting.monotonic ; + +{ 1 1 2 2 2 3 } [ = ] monotonic-split . +! => { { 1 1 } { 2 2 2 } { 3 } } +``` + +The predicate runs on *adjacent pairs*. While it returns truthy, +the run continues; when it returns `f`, a new piece starts. With +`[ = ]` you get runs of equal elements; with `[ <= ]` you get +non-decreasing runs; and so on. + +## When to reach for which + +- **`group`/`clump`**: you know the *size* you want. +- **`split-when`**: you know what a *break element* looks like. +- **`monotonic-split`**: you know what makes two adjacent + elements *still belong together*. + +[grouping]: https://docs.factorcode.org/content/vocab-grouping.html +[splitting]: https://docs.factorcode.org/content/vocab-splitting.html diff --git a/exercises/concept/belgian-boxcars/.meta/config.json b/exercises/concept/belgian-boxcars/.meta/config.json new file mode 100644 index 0000000..f43dc7f --- /dev/null +++ b/exercises/concept/belgian-boxcars/.meta/config.json @@ -0,0 +1,17 @@ +{ + "authors": [ + "keiravillekode" + ], + "files": { + "solution": [ + "belgian-boxcars/belgian-boxcars.factor" + ], + "test": [ + "belgian-boxcars/belgian-boxcars-tests.factor" + ], + "exemplar": [ + ".meta/exemplar.factor" + ] + }, + "blurb": "Run the Antwerp marshalling yard by chunking, sliding, and splitting freight-car sequences with group/clump/split-when/monotonic-split." +} diff --git a/exercises/concept/belgian-boxcars/.meta/design.md b/exercises/concept/belgian-boxcars/.meta/design.md new file mode 100644 index 0000000..3dc69a9 --- /dev/null +++ b/exercises/concept/belgian-boxcars/.meta/design.md @@ -0,0 +1,36 @@ +# Design + +## Goal + +Introduce the four core "view a sequence as pieces" words — +`group`, `clump`, `split-when`, `monotonic-split` — and the +mental model that distinguishes them (size vs single-element +break vs adjacent-pair test). + +## Learning objectives + +- Use `group` for fixed-size disjoint chunks. +- Use `clump` for fixed-size sliding windows. +- Use `split-when` for predicate-driven breaks on single + elements. +- Use `monotonic-split` for predicate-driven runs on adjacent + pairs. + +## Out of scope + +- The slice-returning variants (`group-slice`, + `monotonic-split-slice`). +- ``/`` (the virtual constructors). +- Multi-character separators (those are `split` / `split-subseq` + territory, taught earlier in `log-levels`). + +## Concepts + +- `windows`: viewing a sequence as a sequence of pieces. + +## Prerequisites + +- `higher-order-sequences` — taught in `boutique-bookkeeping`. + Needed for `with` (used to bake `junctions` into the + per-element predicate in task 3) and for the general + quotation idiom. diff --git a/exercises/concept/belgian-boxcars/.meta/exemplar.factor b/exercises/concept/belgian-boxcars/.meta/exemplar.factor new file mode 100644 index 0000000..b150729 --- /dev/null +++ b/exercises/concept/belgian-boxcars/.meta/exemplar.factor @@ -0,0 +1,14 @@ +USING: grouping kernel sequences sets splitting splitting.monotonic ; +IN: belgian-boxcars + +: couple ( cars n -- trains ) + group ; + +: peek-couplings ( cars -- pairs ) + 2 clump ; + +: split-at-junctions ( cars junctions -- legs ) + swap [ swap member? ] with split-when ; + +: coalesce-cargo ( cars -- runs ) + [ = ] monotonic-split ; diff --git a/exercises/concept/belgian-boxcars/belgian-boxcars/belgian-boxcars-tests.factor b/exercises/concept/belgian-boxcars/belgian-boxcars/belgian-boxcars-tests.factor new file mode 100644 index 0000000..e191eb1 --- /dev/null +++ b/exercises/concept/belgian-boxcars/belgian-boxcars/belgian-boxcars-tests.factor @@ -0,0 +1,58 @@ +USING: belgian-boxcars tools.test ; +IN: belgian-boxcars.tests + +! TASK: 1 couple +{ { { "coal" "coal" "coke" } { "ore" "ore" "timber" } { "timber" } } } +[ { "coal" "coal" "coke" "ore" "ore" "timber" "timber" } 3 couple ] +unit-test + +{ { } } [ { } 3 couple ] unit-test + +{ { { "a" "b" } { "c" "d" } } } +[ { "a" "b" "c" "d" } 2 couple ] unit-test + +{ { { "a" } { "b" } { "c" } } } +[ { "a" "b" "c" } 1 couple ] unit-test + +! TASK: 2 peek-couplings +{ { { "coal" "ore" } { "ore" "timber" } { "timber" "crates" } } } +[ { "coal" "ore" "timber" "crates" } peek-couplings ] unit-test + +{ { } } [ { } peek-couplings ] unit-test + +{ { } } [ { "solo" } peek-couplings ] unit-test + +{ { { "a" "b" } } } [ { "a" "b" } peek-couplings ] unit-test + +! TASK: 3 split-at-junctions +{ { { "coal" "coal" } { "ore" } { "timber" } } } +[ { "coal" "coal" "switch" "ore" "switch" "timber" } + { "switch" } split-at-junctions ] unit-test + +! multiple junction kinds +{ { { "coal" } { "ore" } { "timber" } } } +[ { "coal" "Y" "ore" "X" "timber" } + { "X" "Y" } split-at-junctions ] unit-test + +! no junctions present +{ { { "coal" "ore" "timber" } } } +[ { "coal" "ore" "timber" } { "switch" } split-at-junctions ] unit-test + +{ { { } } } +[ { } { "switch" } split-at-junctions ] unit-test + +! leading and trailing junctions produce empty legs at the edges +{ { { } { "coal" } { } } } +[ { "switch" "coal" "switch" } { "switch" } split-at-junctions ] unit-test + +! TASK: 4 coalesce-cargo +{ { { "coal" "coal" } { "ore" "ore" "ore" } { "timber" } { "coal" } } } +[ { "coal" "coal" "ore" "ore" "ore" "timber" "coal" } coalesce-cargo ] +unit-test + +{ { } } [ { } coalesce-cargo ] unit-test + +{ { { "solo" } } } [ { "solo" } coalesce-cargo ] unit-test + +{ { { "a" } { "b" } { "a" } } } +[ { "a" "b" "a" } coalesce-cargo ] unit-test diff --git a/exercises/concept/belgian-boxcars/belgian-boxcars/belgian-boxcars.factor b/exercises/concept/belgian-boxcars/belgian-boxcars/belgian-boxcars.factor new file mode 100644 index 0000000..9087c46 --- /dev/null +++ b/exercises/concept/belgian-boxcars/belgian-boxcars/belgian-boxcars.factor @@ -0,0 +1,14 @@ +USING: kernel ; +IN: belgian-boxcars + +: couple ( cars n -- trains ) + "unimplemented" throw ; + +: peek-couplings ( cars -- pairs ) + "unimplemented" throw ; + +: split-at-junctions ( cars junctions -- legs ) + "unimplemented" throw ; + +: coalesce-cargo ( cars -- runs ) + "unimplemented" throw ; diff --git a/exercises/concept/cars-assemble/.docs/introduction.md b/exercises/concept/cars-assemble/.docs/introduction.md index 528d431..bd4de04 100644 --- a/exercises/concept/cars-assemble/.docs/introduction.md +++ b/exercises/concept/cars-assemble/.docs/introduction.md @@ -45,22 +45,34 @@ when ( ? quot -- ) unless ( ? quot -- ) ``` -## `if*` +## `if*` and `unless*` -`if*` is a variant of `if` that treats the boolean as a *value* -worth keeping when it's truthy — useful when a word returns "the -thing, or `f`". The truthy branch is called with the value still -on the stack; the falsy branch is called without it: +Two kernel variants treat the boolean as a *value* worth keeping +when it's truthy — useful when a word returns "the thing, or `f`": ``` -if* ( ? true false -- ) +if* ( ? true false -- ) ! truthy: true is called WITH ? on stack +unless* ( ? false -- ) ! falsy: false runs and pushes a default ``` +`if*` is the two-branch form. The truthy branch is called with +the value still on the stack; the falsy branch is called +without it: + ```factor 42 [ ] [ "nothing" ] if* . ! prints 42 f [ ] [ "nothing" ] if* . ! prints "nothing" ``` +`unless*` is the canonical "value or default" idiom. If the value +is truthy, it's left alone; if it's `f`, the value is dropped and +the quotation runs to push a substitute: + +```factor +"hello" [ "anonymous" ] unless* . ! => "hello" +f [ "anonymous" ] unless* . ! => "anonymous" +``` + ## `cond` When you have several alternative actions to choose between, `cond` diff --git a/exercises/concept/currency-conversion/.docs/instructions.md b/exercises/concept/currency-conversion/.docs/instructions.md index d46692f..777295c 100644 --- a/exercises/concept/currency-conversion/.docs/instructions.md +++ b/exercises/concept/currency-conversion/.docs/instructions.md @@ -62,8 +62,8 @@ Return the leftover amount that cannot be returned as whole bills. ## 6. Calculate value after exchange -Define `exchangeable-value` taking a `budget`, `exchange-rate`, -`spread`, and `denomination`. +Define `exchangeable-value` taking a `denomination`, `budget`, +`spread`, and `exchange-rate`. `spread` is the *percentage* taken as an exchange fee, written as an integer. It needs to be added to the exchange rate as a fraction. If @@ -75,10 +75,10 @@ plus spread, rounded down to whole bills of the given `denomination`. The returned value is an integer. ```factor -127.25 1.20 10 20 exchangeable-value . +20 127.25 10 1.20 exchangeable-value . ! => 80 -127.25 1.20 10 5 exchangeable-value . +5 127.25 10 1.20 exchangeable-value . ! => 95 ``` diff --git a/exercises/concept/currency-conversion/.meta/exemplar.factor b/exercises/concept/currency-conversion/.meta/exemplar.factor index 0244212..7aa8254 100644 --- a/exercises/concept/currency-conversion/.meta/exemplar.factor +++ b/exercises/concept/currency-conversion/.meta/exemplar.factor @@ -1,4 +1,4 @@ -USING: kernel locals math math.functions math.order ; +USING: kernel math math.functions math.order ; IN: currency-conversion : exchange-money ( budget exchange-rate -- exchanged ) @@ -10,18 +10,14 @@ IN: currency-conversion : value-of-bills ( denomination number-of-bills -- value ) * ; -:: number-of-bills ( amount denomination -- bills ) - amount floor >integer denomination /i ; +: number-of-bills ( amount denomination -- bills ) + swap floor >integer swap /i ; : leftover-of-bills ( amount denomination -- leftover ) mod ; -:: exchangeable-value ( budget exchange-rate spread denomination -- value ) - budget - exchange-rate exchange-rate 100 / spread * + - exchange-money - denomination number-of-bills - denomination swap value-of-bills ; +: exchangeable-value ( denomination budget spread exchange-rate -- value ) + swap 100 + * 100 / / over number-of-bills * ; : safe-change ( budget exchanging-value -- change ) - 0 max ; diff --git a/exercises/concept/currency-conversion/currency-conversion/currency-conversion-tests.factor b/exercises/concept/currency-conversion/currency-conversion/currency-conversion-tests.factor index 7db2575..67067a5 100644 --- a/exercises/concept/currency-conversion/currency-conversion/currency-conversion-tests.factor +++ b/exercises/concept/currency-conversion/currency-conversion/currency-conversion-tests.factor @@ -25,11 +25,12 @@ IN: currency-conversion.tests { 1.14 1e-8 } [ 3.14 2 leftover-of-bills ] unit-test~ ! TASK: 6 exchangeable-value -{ 8568 } [ 100000 10.61 10 1 exchangeable-value ] unit-test -{ 1400 } [ 1500 0.84 25 40 exchangeable-value ] unit-test -{ 0 } [ 470000 1050 30 10000000000 exchangeable-value ] unit-test -{ 4017094016600 } [ 470000 0.00000009 30 700 exchangeable-value ] unit-test -{ 363300 } [ 425.33 0.0009 30 700 exchangeable-value ] unit-test +! Args are ( denomination budget spread exchange-rate ). +{ 8568 } [ 1 100000 10 10.61 exchangeable-value ] unit-test +{ 1400 } [ 40 1500 25 0.84 exchangeable-value ] unit-test +{ 0 } [ 10000000000 470000 30 1050 exchangeable-value ] unit-test +{ 4017094016600 } [ 700 470000 30 0.00000009 exchangeable-value ] unit-test +{ 363300 } [ 700 425.33 30 0.0009 exchangeable-value ] unit-test ! TASK: 7 safe-change { 7.5 } [ 127.5 120 safe-change ] unit-test diff --git a/exercises/concept/currency-conversion/currency-conversion/currency-conversion.factor b/exercises/concept/currency-conversion/currency-conversion/currency-conversion.factor index d758d07..e364f70 100644 --- a/exercises/concept/currency-conversion/currency-conversion/currency-conversion.factor +++ b/exercises/concept/currency-conversion/currency-conversion/currency-conversion.factor @@ -16,7 +16,7 @@ IN: currency-conversion : leftover-of-bills ( amount denomination -- leftover ) "unimplemented" throw ; -: exchangeable-value ( budget exchange-rate spread denomination -- value ) +: exchangeable-value ( denomination budget spread exchange-rate -- value ) "unimplemented" throw ; : safe-change ( budget exchanging-value -- change ) diff --git a/exercises/concept/joiners-journey/.docs/introduction.md b/exercises/concept/joiners-journey/.docs/introduction.md index 22dd560..b4cedbc 100644 --- a/exercises/concept/joiners-journey/.docs/introduction.md +++ b/exercises/concept/joiners-journey/.docs/introduction.md @@ -16,15 +16,18 @@ dup ( x -- x x ) dupd ( x y -- x x y ) drop ( x -- ) swap ( x y -- y x ) +swapd ( x y z -- y x z ) over ( x y -- x y x ) pick ( x y z -- x y z x ) rot ( x y z -- y z x ) -rot ( x y z -- z x y ) +rotd ( w x y z -- w y z x ) spin ( x y z -- z y x ) nip ( x y -- y ) 2dup ( x y -- x y x y ) 2drop ( x y -- ) +2nip ( x y z -- z ) 2swap ( x y z w -- z w x y ) ``` @@ -98,23 +101,31 @@ tri ( x q1 q2 q3 -- r1 r2 r3 ) The empty quotation `[ ]` acts as the identity — useful when one of the slots in the cut card should be the input itself. -## `dip` — operate underneath +## `dip` and `2dip` — operate underneath `dip` (in [`kernel`][kernel]) calls a quotation on the values *under* -the top of stack, leaving the top untouched: +the top of stack, leaving the top untouched. `2dip` does the same +but protects the top *two* values: ``` -dip ( x quot -- x ) +dip ( x quot -- x ) +2dip ( x y quot -- x y ) ``` ```factor -1 2 3 [ + ] dip .s -! => 3 -! => 3 +9 10 11 [ + ] dip .s +! => 19 (9 + 10) +! => 11 (the protected top, restored) + +9 10 11 12 [ + ] 2dip .s +! => 19 (9 + 10) +! => 11 (the protected y, originally 11) +! => 12 (the protected x, originally 12) ``` -Here `3` was hidden, `1 2 +` ran below it, and `3` was put back on -top. +Here `dip` hid `11`, ran `9 10 +`, and put `11` back on top. +`2dip` hid `11 12`, ran `9 10 +` underneath, then restored +`11 12`. `dip` is the right tool when the natural argument order leaves the "pass-through" value on top of the stack. Reach for it instead of diff --git a/exercises/concept/lasagna/.docs/introduction.md b/exercises/concept/lasagna/.docs/introduction.md index 56d1823..341b0a9 100644 --- a/exercises/concept/lasagna/.docs/introduction.md +++ b/exercises/concept/lasagna/.docs/introduction.md @@ -58,6 +58,21 @@ top, the stack effect is `( x y -- difference )`, and the result is A trailing `?` in the outputs is the convention for "a boolean", but the lasagna exercise uses only numbers. +## Rearranging the top of the stack + +Three small shuffle words from the `kernel` vocabulary handle the +most common rearrangements: + +``` +dup ( x -- x x ) ! duplicate the top +swap ( x y -- y x ) ! flip the top two +over ( x y -- x y x ) ! copy the second-from-top onto the top +``` + +Use `dup` when one input value needs to feed two operations, +`swap` when two values are in the wrong order for the next word, +and `over` when you need to *keep* a value while still using it. + ## Defining a word `:` starts a word definition, the stack effect comes next, then the @@ -101,17 +116,6 @@ defined earlier in the same file: This is how the last task in the exercise reuses an earlier one. -## Swapping the top two values - -If two values are on the stack in the wrong order for the next word, -`swap` flips the top two: - -``` -swap ( x y -- y x ) -``` - -`swap` lives in the `kernel` vocabulary. - ## Naming conventions Words and constants both use `lowercase-kebab-case`: lowercase letters diff --git a/exercises/concept/ledger-loop/.docs/hints.md b/exercises/concept/ledger-loop/.docs/hints.md index 7c684c1..82f5961 100644 --- a/exercises/concept/ledger-loop/.docs/hints.md +++ b/exercises/concept/ledger-loop/.docs/hints.md @@ -26,4 +26,20 @@ - First compute the running balance, then run the running minimum over that result. Two words, chained. +## 4. Halve until target + +- `produce` is the right tool: it generates a sequence by + repeatedly testing and stepping. +- The state running through `produce` is *both* the running + value and the target. Put `target` on the bottom and the + current value on top; the predicate compares them with + `2dup <` (target less than running, i.e., still above + target). +- Inside the quotation, divide the current value by 2 with + `2 /i` and `dup` the result so it both updates the running + state and is emitted as the new element. +- After `produce`, the leftover state (target plus final + value) needs to be dropped — `2nip` peels off the bottom + two values and leaves only the produced sequence. + [math.statistics]: https://docs.factorcode.org/content/vocab-math.statistics.html diff --git a/exercises/concept/ledger-loop/.docs/instructions.md b/exercises/concept/ledger-loop/.docs/instructions.md index 05cd2c6..37d3800 100644 --- a/exercises/concept/ledger-loop/.docs/instructions.md +++ b/exercises/concept/ledger-loop/.docs/instructions.md @@ -62,3 +62,25 @@ days when the account looked risky. { 200 -50 -100 -200 } least-balance-so-far . ! => { 200 150 50 -150 } ``` + +## 4. Halve until target + +The board is depreciating an asset on the ledger by halving its +recorded value each quarter. Define `halve-until` to take a +`principal` and a `target`, and return the sequence of halved +values (using integer division) starting from the first halving, +continuing as long as the running value is still strictly above +`target`. The last emitted value will be the first one that +drops to or below `target`. + +```factor +100 5 halve-until . +! => { 50 25 12 6 3 } + +64 1 halve-until . +! => { 32 16 8 4 2 1 } + +3 5 halve-until . +! => { } +``` + diff --git a/exercises/concept/ledger-loop/.docs/introduction.md b/exercises/concept/ledger-loop/.docs/introduction.md index 082b2b3..41375e2 100644 --- a/exercises/concept/ledger-loop/.docs/introduction.md +++ b/exercises/concept/ledger-loop/.docs/introduction.md @@ -26,16 +26,20 @@ USING: math sequences ; ``` A non-zero seed and a custom combiner are the parts of `reduce` -that `sum` and `product` cannot reach. For example, threading a -per-step floor: +that `sum` and `product` cannot reach. For example, the largest +value in a sequence, with a default if no value beats it: ```factor -100 { 50 -200 30 } [ + 0 max ] reduce . ! => 30 +USING: math.order ; + +{ 3 1 -4 5 -2 } 0 [ max ] reduce . ! => 5 +{ -3 -1 -4 } 0 [ max ] reduce . ! => 0 ``` -`100 + 50 = 150`; `150 + (-200) = -50`, floored to `0`; -`0 + 30 = 30`. The floor applies at each step, so `sum +` would -give the wrong answer (`-20`). +The seed `0` participates in the comparison: it acts as the +result when every element loses, so a sequence of all-negative +values still produces `0` rather than an arbitrary smallest +value. ## Cumulative reductions @@ -61,10 +65,60 @@ USING: math.statistics ; { 3 1 4 1 5 9 2 6 } cum-max . ! => { 3 3 4 4 5 9 9 9 } ``` -A useful pattern is **chained** cumulative reductions: feed the -output of one into another to get a running summary of a -running summary. For example, taking `cum-min` of the running -totals tells you the worst running total seen so far at each -position. +A useful pattern is **chained** cumulative reductions: the +output of one is itself a sequence, ready to feed into another. +That makes "running summary of a running summary" expressible +in two words. The combinations are flexible — pair them up +based on what each step is summarising. + +## `produce` — the unfold + +`reduce` consumes a sequence into a value. `produce` (in +[`sequences`][sequences]) goes the other way — *generates* a +sequence from a seed by repeatedly testing and stepping: + +``` +produce ( pred quot -- seq ) +``` + +Each iteration first runs `pred` on the current state; if it +returns truthy, `quot` is called to produce the next element +and update the state. When `pred` returns `f`, iteration stops +and the collected elements are returned. + +A classic example is the **Fibonacci sequence** (each number is +the sum of the previous two). The running state is the pair +`(a, b)`. Each step emits `b`, then replaces the pair with +`(b, a + b)`: + +```factor +USING: kernel math sequences ; + +! Fibonacci numbers strictly below 100: +0 1 [ dup 100 < ] [ tuck + over ] produce 2nip . +! => { 1 1 2 3 5 8 13 21 34 55 89 } +``` + +The running state spans *two* values, so the body uses `tuck` +(in [`kernel`][kernel]) — the three-element shuffle that copies +the top under the second — to step the pair, and `2nip` (also in +`kernel`, the two-element analogue of `nip`) tidies up at the +end. Reading the call left to right: + +- The predicate `[ dup 100 < ]` peeks at the top of the pair + (the *next* number to be emitted) and continues while it's + still below the bound. +- The body `[ tuck + over ]` advances the state to + `(b, a + b)` and emits `b`, leaving three values on the stack + — the new pair below, the emitted number on top. +- After `produce` stops, the two trailing values (the final + pair) are discarded with `2nip`, leaving only the produced + sequence. + +`produce` is the exact dual of `reduce`: where `reduce` folds a +sequence down to a value, `produce` unfolds a value up to a +sequence. [math.statistics]: https://docs.factorcode.org/content/vocab-math.statistics.html +[sequences]: https://docs.factorcode.org/content/vocab-sequences.html +[kernel]: https://docs.factorcode.org/content/vocab-kernel.html diff --git a/exercises/concept/ledger-loop/.meta/exemplar.factor b/exercises/concept/ledger-loop/.meta/exemplar.factor index 4984e59..3a16a3c 100644 --- a/exercises/concept/ledger-loop/.meta/exemplar.factor +++ b/exercises/concept/ledger-loop/.meta/exemplar.factor @@ -9,3 +9,6 @@ IN: ledger-loop : least-balance-so-far ( transactions -- worsts ) cum-sum cum-min ; + +: halve-until ( principal target -- balances ) + swap [ 2dup < ] [ 2 /i dup ] produce 2nip ; diff --git a/exercises/concept/ledger-loop/ledger-loop/ledger-loop-tests.factor b/exercises/concept/ledger-loop/ledger-loop/ledger-loop-tests.factor index 4e0122a..385d143 100644 --- a/exercises/concept/ledger-loop/ledger-loop/ledger-loop-tests.factor +++ b/exercises/concept/ledger-loop/ledger-loop/ledger-loop-tests.factor @@ -18,3 +18,10 @@ IN: ledger-loop.tests { { 200 150 50 -150 } } [ { 200 -50 -100 -200 } least-balance-so-far ] unit-test { { } } [ { } least-balance-so-far ] unit-test { { 7 } } [ { 7 } least-balance-so-far ] unit-test + +! TASK: 4 halve-until +{ { 50 25 12 6 3 } } [ 100 5 halve-until ] unit-test +{ { 32 16 8 4 2 1 } } [ 64 1 halve-until ] unit-test +{ { } } [ 3 5 halve-until ] unit-test +{ { 5 } } [ 10 5 halve-until ] unit-test +{ { } } [ 5 5 halve-until ] unit-test diff --git a/exercises/concept/ledger-loop/ledger-loop/ledger-loop.factor b/exercises/concept/ledger-loop/ledger-loop/ledger-loop.factor index c946e4e..cb473a5 100644 --- a/exercises/concept/ledger-loop/ledger-loop/ledger-loop.factor +++ b/exercises/concept/ledger-loop/ledger-loop/ledger-loop.factor @@ -9,3 +9,6 @@ IN: ledger-loop : least-balance-so-far ( transactions -- worsts ) "unimplemented" throw ; + +: halve-until ( principal target -- balances ) + "unimplemented" throw ; diff --git a/exercises/concept/lighthouse-logbook/.docs/hints.md b/exercises/concept/lighthouse-logbook/.docs/hints.md new file mode 100644 index 0000000..b74451f --- /dev/null +++ b/exercises/concept/lighthouse-logbook/.docs/hints.md @@ -0,0 +1,39 @@ +# Hints + +## 1. `empty-log` + +- `HS{ }` is the empty literal, but it's *shared* — clone it. + +## 2. `sight` + +- One word from `sets`: the destructive insert. + +## 3. `seen?` + +- `in?` is the set-protocol membership test. It's the O(1) + hash lookup, distinct from `member?`'s linear scan over a + sequence. + +## 4. `forget-sighting` + +- One word from `sets`: the destructive remove. + +## 5. `unique-count` + +- `cardinality` returns the size of a set. + +## 6. `reachable` + +- This is a textbook breadth-first search. +- Maintain two mutable structures: a `HS{ }` for visited + lighthouses (so you don't process anyone twice) and a `V{ }` + acting as a frontier queue (`push` to enqueue, `pop` to + dequeue). +- Seed both with `start`. Then `while` the frontier is + non-empty, dequeue one lighthouse, look up its neighbours in + `relay-map` (using `at`), and for each neighbour that isn't + yet in `visited`, `adjoin` it to `visited` and `push` it onto + the frontier. +- Return `visited`. +- Locals (`::` and `:>`) keep this readable; use them for + `visited`, `frontier`, and the per-iteration neighbour lookup. diff --git a/exercises/concept/lighthouse-logbook/.docs/instructions.md b/exercises/concept/lighthouse-logbook/.docs/instructions.md new file mode 100644 index 0000000..485525f --- /dev/null +++ b/exercises/concept/lighthouse-logbook/.docs/instructions.md @@ -0,0 +1,84 @@ +# Instructions + +You are the keeper at Cape Crozier lighthouse. From the lantern +room you log every vessel that passes by — each is identified by +a unique callsign — and you also coordinate signal relays with +the other lighthouses up and down the coast. + +## 1. A fresh logbook + +Define `empty-log` to return a fresh empty hash-set, ready to +collect callsigns. + +```factor +empty-log . +! => HS{ } +``` + +## 2. Record a sighting + +Define `sight` to take a logbook and a callsign, and record the +sighting in place. Returns nothing. + +```factor +empty-log dup "NS-1024" sight . +! => HS{ "NS-1024" } +``` + +## 3. Have we seen this one? + +Define `seen?` to take a logbook and a callsign, returning `t` +if the callsign has been recorded and `f` otherwise. + +```factor +HS{ "NS-1024" "WB-203" } "NS-1024" seen? . ! => t +HS{ "NS-1024" "WB-203" } "X-99" seen? . ! => f +``` + +## 4. Forget a sighting + +Define `forget-sighting` to take a logbook and a callsign and +remove the callsign from the log in place. Returns nothing. If +the callsign isn't there, do nothing. + +```factor +HS{ "NS-1024" "WB-203" } clone dup "WB-203" forget-sighting . +! => HS{ "NS-1024" } +``` + +## 5. How many distinct vessels? + +Define `unique-count` to return the number of distinct +callsigns in the log. + +```factor +HS{ "NS-1024" "WB-203" "AC-77" } unique-count . ! => 3 +empty-log unique-count . ! => 0 +``` + +## 6. Reachable lighthouses + +The coast guard maintains a `relay-map`: a hashtable keyed by +lighthouse name, with each value being an array of the +lighthouses that the keyed one can directly relay to. + +Define `reachable` to take a `start` lighthouse and a +`relay-map`, and return a hash-set of every lighthouse reachable +from `start` (including `start` itself) by repeated relays. + +```factor +H{ + { "Crozier" { "Beacon" "Hadley" } } + { "Beacon" { "Crozier" "Spiral" } } + { "Hadley" { "Crozier" } } + { "Spiral" { "Beacon" "Outpost" } } + { "Outpost" { "Spiral" } } + { "Far-Isle" { "Lonely" } } + { "Lonely" { "Far-Isle" } } +} +"Crozier" swap reachable . +! => HS{ "Crozier" "Beacon" "Hadley" "Spiral" "Outpost" } +``` + +The `Far-Isle`/`Lonely` pair is its own connected component, so +neither appears in the result. diff --git a/exercises/concept/lighthouse-logbook/.docs/introduction.md b/exercises/concept/lighthouse-logbook/.docs/introduction.md new file mode 100644 index 0000000..8faee1d --- /dev/null +++ b/exercises/concept/lighthouse-logbook/.docs/introduction.md @@ -0,0 +1,80 @@ +# Introduction + +Hash-sets are mutable, unordered collections that store each +value at most once. Lookup, insert, and delete are all O(1) +average. They live in [`hash-sets`][hash-sets] and implement the +[`sets`][sets] protocol. + +## Hash-set literals + +```factor +HS{ "NS-1024" "WB-203" } . +! => HS{ "NS-1024" "WB-203" } +``` + +`HS{ }` is the empty literal. Like other Factor literals it's a +*shared* object — every reference to `HS{ }` in source points to +the same set. `HS{ } clone` gives you a fresh independent copy +each call. + +## Adjoining and removing + +``` +adjoin ( elt set -- ) ! insert in place; no-op if already present +delete ( elt set -- ) ! remove in place; no-op if absent +``` + +Both mutate the set; neither returns anything on the stack. + +```factor +USING: hash-sets kernel sets ; + +HS{ } clone +"NS-1024" over adjoin +"WB-203" over adjoin +"NS-1024" over adjoin ! duplicate — no effect +. ! => HS{ "NS-1024" "WB-203" } +``` + +## Asking the set + +``` +in? ( elt set -- ? ) ! is elt in the set? +cardinality ( set -- n ) ! number of elements +members ( set -- seq ) ! enumerate as a sequence +``` + +`in?` is the set-protocol membership test. It's distinct from +`member?` (from `sequences`), which does a linear scan over a +sequence. For a hash-set, `in?` is a hash lookup — O(1) average, +the whole point of using a hash-set. + +```factor +"NS-1024" my-log in? . ! => t +"X-99" my-log in? . ! => f +my-log cardinality . ! => 2 +``` + +## Combining sets + +``` +union ( set1 set2 -- set ) +intersect ( set1 set2 -- set ) +diff ( set1 set2 -- set ) +``` + +`union` is "all elements from either"; `intersect` is "elements +in both"; `diff` is "in `set1` but not `set2`". Each returns a +new set without mutating its inputs. + +## Why this matters + +Hash-sets pair naturally with hashtables for *graph traversal*: +a hashtable maps each node to its neighbours, and a hash-set +records which nodes have already been visited so the search +doesn't loop or repeat work. The traversal is then a queue +plus the two structures, mutated in place as you sweep +outwards. + +[hash-sets]: https://docs.factorcode.org/content/vocab-hash-sets.html +[sets]: https://docs.factorcode.org/content/vocab-sets.html diff --git a/exercises/concept/lighthouse-logbook/.meta/config.json b/exercises/concept/lighthouse-logbook/.meta/config.json new file mode 100644 index 0000000..1e016e9 --- /dev/null +++ b/exercises/concept/lighthouse-logbook/.meta/config.json @@ -0,0 +1,17 @@ +{ + "authors": [ + "keiravillekode" + ], + "files": { + "solution": [ + "lighthouse-logbook/lighthouse-logbook.factor" + ], + "test": [ + "lighthouse-logbook/lighthouse-logbook-tests.factor" + ], + "exemplar": [ + ".meta/exemplar.factor" + ] + }, + "blurb": "Track unique vessel sightings and signal-relay reachability with mutable hash-sets and a textbook BFS." +} diff --git a/exercises/concept/lighthouse-logbook/.meta/design.md b/exercises/concept/lighthouse-logbook/.meta/design.md new file mode 100644 index 0000000..2e01a0d --- /dev/null +++ b/exercises/concept/lighthouse-logbook/.meta/design.md @@ -0,0 +1,39 @@ +# Design + +## Goal + +Introduce mutable hash-sets and the canonical BFS pattern that +combines a hash-set (visited) with a hashtable (adjacency map). + +## Learning objectives + +- Build a fresh mutable hash-set with `HS{ } clone`. +- Insert, remove, test, and count with `adjoin`, `delete`, + `in?`, `cardinality`. +- Distinguish `in?` (O(1) hash lookup) from `member?` (linear + scan over a sequence). +- Combine a hash-set with a hashtable to perform a graph + traversal — the textbook visited-set + frontier-queue + pattern. + +## Out of scope + +- The non-hash set implementations. +- `union`/`intersect`/`diff` as combinators in tasks (mentioned + in the introduction only). +- Persistent / immutable set semantics. + +## Concepts + +- `hash-sets`: mutable hash-set operations and the visited-set + pattern. + +## Prerequisites + +- `assocs` — taught in `storeroom-stocktake`. Needed for the + hashtable adjacency map in task 6. +- `mutation` — taught in `mosaic-mischief`. Needed for the + vector queue (`push`/`pop`) in task 6. +- `locals` — taught in `lasagna-luminary`. Needed to keep the + BFS body readable. +- `while` — taught in `mixed-juices`. Needed for the BFS loop. diff --git a/exercises/concept/lighthouse-logbook/.meta/exemplar.factor b/exercises/concept/lighthouse-logbook/.meta/exemplar.factor new file mode 100644 index 0000000..7423a25 --- /dev/null +++ b/exercises/concept/lighthouse-logbook/.meta/exemplar.factor @@ -0,0 +1,31 @@ +USING: assocs combinators hash-sets kernel locals sequences sets ; +IN: lighthouse-logbook + +: empty-log ( -- log ) + HS{ } clone ; + +: sight ( log callsign -- ) + swap adjoin ; + +: seen? ( log callsign -- ? ) + swap in? ; + +: forget-sighting ( log callsign -- ) + swap delete ; + +: unique-count ( log -- n ) + cardinality ; + +:: reachable ( start relay-map -- visited ) + HS{ } clone :> visited + V{ } clone :> frontier + start visited adjoin + start frontier push + [ frontier empty? not ] [ + frontier pop relay-map at [ + dup visited in? [ drop ] [ + [ visited adjoin ] [ frontier push ] bi + ] if + ] each + ] while + visited ; diff --git a/exercises/concept/lighthouse-logbook/lighthouse-logbook/lighthouse-logbook-tests.factor b/exercises/concept/lighthouse-logbook/lighthouse-logbook/lighthouse-logbook-tests.factor new file mode 100644 index 0000000..0ec62c4 --- /dev/null +++ b/exercises/concept/lighthouse-logbook/lighthouse-logbook/lighthouse-logbook-tests.factor @@ -0,0 +1,72 @@ +USING: hash-sets kernel lighthouse-logbook tools.test ; +IN: lighthouse-logbook.tests + +! TASK: 1 empty-log +{ HS{ } } [ empty-log ] unit-test + +! a fresh log each call (not the shared HS{ } literal) +{ HS{ "x" } } [ empty-log dup "x" sight ] unit-test +{ HS{ } } [ empty-log ] unit-test + +! TASK: 2 sight +{ HS{ "NS-1024" } } +[ empty-log dup "NS-1024" sight ] unit-test + +{ HS{ "NS-1024" "WB-203" } } +[ empty-log dup "NS-1024" sight dup "WB-203" sight ] unit-test + +! adjoining a duplicate is a no-op +{ HS{ "NS-1024" } } +[ empty-log dup "NS-1024" sight dup "NS-1024" sight ] unit-test + +! TASK: 3 seen? +{ t } [ HS{ "NS-1024" "WB-203" } "NS-1024" seen? ] unit-test +{ f } [ HS{ "NS-1024" "WB-203" } "X-99" seen? ] unit-test +{ f } [ empty-log "anything" seen? ] unit-test + +! TASK: 4 forget-sighting +{ HS{ "NS-1024" } } +[ HS{ "NS-1024" "WB-203" } clone dup "WB-203" forget-sighting ] unit-test + +! removing a missing callsign is a no-op +{ HS{ "NS-1024" } } +[ HS{ "NS-1024" } clone dup "X-99" forget-sighting ] unit-test + +! TASK: 5 unique-count +{ 3 } [ HS{ "NS-1024" "WB-203" "AC-77" } unique-count ] unit-test +{ 0 } [ empty-log unique-count ] unit-test +{ 1 } [ HS{ "solo" } unique-count ] unit-test + +! TASK: 6 reachable +! a connected component +{ HS{ "Crozier" "Beacon" "Hadley" "Spiral" "Outpost" } } +[ + "Crozier" + H{ + { "Crozier" { "Beacon" "Hadley" } } + { "Beacon" { "Crozier" "Spiral" } } + { "Hadley" { "Crozier" } } + { "Spiral" { "Beacon" "Outpost" } } + { "Outpost" { "Spiral" } } + { "Far-Isle" { "Lonely" } } + { "Lonely" { "Far-Isle" } } + } + reachable +] unit-test + +! a disconnected start +{ HS{ "Far-Isle" "Lonely" } } +[ + "Far-Isle" + H{ + { "Crozier" { "Beacon" } } + { "Beacon" { "Crozier" } } + { "Far-Isle" { "Lonely" } } + { "Lonely" { "Far-Isle" } } + } + reachable +] unit-test + +! singleton with no neighbours +{ HS{ "Solo" } } +[ "Solo" H{ { "Solo" { } } } reachable ] unit-test diff --git a/exercises/concept/lighthouse-logbook/lighthouse-logbook/lighthouse-logbook.factor b/exercises/concept/lighthouse-logbook/lighthouse-logbook/lighthouse-logbook.factor new file mode 100644 index 0000000..53bbd60 --- /dev/null +++ b/exercises/concept/lighthouse-logbook/lighthouse-logbook/lighthouse-logbook.factor @@ -0,0 +1,20 @@ +USING: kernel ; +IN: lighthouse-logbook + +: empty-log ( -- log ) + "unimplemented" throw ; + +: sight ( log callsign -- ) + "unimplemented" throw ; + +: seen? ( log callsign -- ? ) + "unimplemented" throw ; + +: forget-sighting ( log callsign -- ) + "unimplemented" throw ; + +: unique-count ( log -- n ) + "unimplemented" throw ; + +: reachable ( start relay-map -- visited ) + "unimplemented" throw ; diff --git a/exercises/concept/role-playing-game/.docs/introduction.md b/exercises/concept/role-playing-game/.docs/introduction.md index a1200ea..d4544ff 100644 --- a/exercises/concept/role-playing-game/.docs/introduction.md +++ b/exercises/concept/role-playing-game/.docs/introduction.md @@ -79,20 +79,5 @@ new one: `change-x` is also mutating, so `clone` before calling it if you want to preserve the original. -## `unless*` - -`unless*` (in [`kernel`][kernel]) is the "default value" idiom: if -the top of the stack is truthy, leave it alone; otherwise drop it -and run the quotation: - -``` -unless* ( value/f quot -- value ) -``` - -```factor -"hello" [ "default" ] unless* . ! => "hello" -f [ "default" ] unless* . ! => "default" -``` - [accessors]: https://docs.factorcode.org/content/vocab-accessors.html [kernel]: https://docs.factorcode.org/content/vocab-kernel.html diff --git a/exercises/practice/allergies/.docs/instructions.md b/exercises/practice/allergies/.docs/instructions.md new file mode 100644 index 0000000..daf8cfd --- /dev/null +++ b/exercises/practice/allergies/.docs/instructions.md @@ -0,0 +1,27 @@ +# Instructions + +Given a person's allergy score, determine whether or not they're allergic to a given item, and their full list of allergies. + +An allergy test produces a single numeric score which contains the information about all the allergies the person has (that they were tested for). + +The list of items (and their value) that were tested are: + +- eggs (1) +- peanuts (2) +- shellfish (4) +- strawberries (8) +- tomatoes (16) +- chocolate (32) +- pollen (64) +- cats (128) + +So if Tom is allergic to peanuts and chocolate, he gets a score of 34. + +Now, given just that score of 34, your program should be able to say: + +- Whether Tom is allergic to any one of those allergens listed above. +- All the allergens Tom is allergic to. + +Note: a given score may include allergens **not** listed above (i.e. allergens that score 256, 512, 1024, etc.). +Your program should ignore those components of the score. +For example, if the allergy score is 257, your program should only report the eggs (1) allergy. diff --git a/exercises/practice/allergies/.meta/config.json b/exercises/practice/allergies/.meta/config.json new file mode 100644 index 0000000..52b22ab --- /dev/null +++ b/exercises/practice/allergies/.meta/config.json @@ -0,0 +1,19 @@ +{ + "authors": [ + "keiravillekode" + ], + "files": { + "solution": [ + "allergies/allergies.factor" + ], + "test": [ + "allergies/allergies-tests.factor" + ], + "example": [ + ".meta/example.factor" + ] + }, + "blurb": "Given a person's allergy score, determine whether or not they're allergic to a given item, and their full list of allergies.", + "source": "Exercise by the JumpstartLab team for students at The Turing School of Software and Design.", + "source_url": "https://www.turing.edu/" +} diff --git a/exercises/practice/allergies/.meta/example.factor b/exercises/practice/allergies/.meta/example.factor new file mode 100644 index 0000000..eebeaf0 --- /dev/null +++ b/exercises/practice/allergies/.meta/example.factor @@ -0,0 +1,15 @@ +USING: hash-sets kernel locals math.bitwise sequences sets ; +IN: allergies + +CONSTANT: ALLERGY-NAMES { "eggs" "peanuts" "shellfish" "strawberries" + "tomatoes" "chocolate" "pollen" "cats" } + +:: allergens ( score -- set ) + HS{ } clone :> result + ALLERGY-NAMES [| name index | + score index bit? [ name result adjoin ] when + ] each-index + result ; + +: allergic-to ( score item -- ? ) + swap allergens in? ; diff --git a/exercises/practice/allergies/.meta/generator.jl b/exercises/practice/allergies/.meta/generator.jl new file mode 100644 index 0000000..8a2e4ea --- /dev/null +++ b/exercises/practice/allergies/.meta/generator.jl @@ -0,0 +1,25 @@ +module Allergies + +function gen_test_case(case) + property = case["property"] + input = case["input"] + expected = case["expected"] + + if property == "allergicTo" + score = input["score"] + item = input["item"] + bool_str = expected ? "t" : "f" + return """{ $(bool_str) }\n[ $(score) "$(item)" allergic-to ] unit-test""" + elseif property == "list" + score = input["score"] + if isempty(expected) + items_str = "HS{ }" + else + quoted = ["\"$(s)\"" for s in expected] + items_str = "HS{ " * join(quoted, " ") * " }" + end + return """{ $(items_str) }\n[ $(score) allergens ] unit-test""" + end +end + +end diff --git a/exercises/practice/allergies/.meta/tests.toml b/exercises/practice/allergies/.meta/tests.toml new file mode 100644 index 0000000..799ab85 --- /dev/null +++ b/exercises/practice/allergies/.meta/tests.toml @@ -0,0 +1,160 @@ +# This is an auto-generated file. +# +# Regenerating this file via `configlet sync` will: +# - Recreate every `description` key/value pair +# - Recreate every `reimplements` key/value pair, where they exist in problem-specifications +# - Remove any `include = true` key/value pair (an omitted `include` key implies inclusion) +# - Preserve any other key/value pair +# +# As user-added comments (using the # character) will be removed when this file +# is regenerated, comments can be added via a `comment` key. + +[17fc7296-2440-4ac4-ad7b-d07c321bc5a0] +description = "testing for eggs allergy -> not allergic to anything" + +[07ced27b-1da5-4c2e-8ae2-cb2791437546] +description = "testing for eggs allergy -> allergic only to eggs" + +[5035b954-b6fa-4b9b-a487-dae69d8c5f96] +description = "testing for eggs allergy -> allergic to eggs and something else" + +[64a6a83a-5723-4b5b-a896-663307403310] +description = "testing for eggs allergy -> allergic to something, but not eggs" + +[90c8f484-456b-41c4-82ba-2d08d93231c6] +description = "testing for eggs allergy -> allergic to everything" + +[d266a59a-fccc-413b-ac53-d57cb1f0db9d] +description = "testing for peanuts allergy -> not allergic to anything" + +[ea210a98-860d-46b2-a5bf-50d8995b3f2a] +description = "testing for peanuts allergy -> allergic only to peanuts" + +[eac69ae9-8d14-4291-ac4b-7fd2c73d3a5b] +description = "testing for peanuts allergy -> allergic to peanuts and something else" + +[9152058c-ce39-4b16-9b1d-283ec6d25085] +description = "testing for peanuts allergy -> allergic to something, but not peanuts" + +[d2d71fd8-63d5-40f9-a627-fbdaf88caeab] +description = "testing for peanuts allergy -> allergic to everything" + +[b948b0a1-cbf7-4b28-a244-73ff56687c80] +description = "testing for shellfish allergy -> not allergic to anything" + +[9ce9a6f3-53e9-4923-85e0-73019047c567] +description = "testing for shellfish allergy -> allergic only to shellfish" + +[b272fca5-57ba-4b00-bd0c-43a737ab2131] +description = "testing for shellfish allergy -> allergic to shellfish and something else" + +[21ef8e17-c227-494e-8e78-470a1c59c3d8] +description = "testing for shellfish allergy -> allergic to something, but not shellfish" + +[cc789c19-2b5e-4c67-b146-625dc8cfa34e] +description = "testing for shellfish allergy -> allergic to everything" + +[651bde0a-2a74-46c4-ab55-02a0906ca2f5] +description = "testing for strawberries allergy -> not allergic to anything" + +[b649a750-9703-4f5f-b7f7-91da2c160ece] +description = "testing for strawberries allergy -> allergic only to strawberries" + +[50f5f8f3-3bac-47e6-8dba-2d94470a4bc6] +description = "testing for strawberries allergy -> allergic to strawberries and something else" + +[23dd6952-88c9-48d7-a7d5-5d0343deb18d] +description = "testing for strawberries allergy -> allergic to something, but not strawberries" + +[74afaae2-13b6-43a2-837a-286cd42e7d7e] +description = "testing for strawberries allergy -> allergic to everything" + +[c49a91ef-6252-415e-907e-a9d26ef61723] +description = "testing for tomatoes allergy -> not allergic to anything" + +[b69c5131-b7d0-41ad-a32c-e1b2cc632df8] +description = "testing for tomatoes allergy -> allergic only to tomatoes" + +[1ca50eb1-f042-4ccf-9050-341521b929ec] +description = "testing for tomatoes allergy -> allergic to tomatoes and something else" + +[e9846baa-456b-4eff-8025-034b9f77bd8e] +description = "testing for tomatoes allergy -> allergic to something, but not tomatoes" + +[b2414f01-f3ad-4965-8391-e65f54dad35f] +description = "testing for tomatoes allergy -> allergic to everything" + +[978467ab-bda4-49f7-b004-1d011ead947c] +description = "testing for chocolate allergy -> not allergic to anything" + +[59cf4e49-06ea-4139-a2c1-d7aad28f8cbc] +description = "testing for chocolate allergy -> allergic only to chocolate" + +[b0a7c07b-2db7-4f73-a180-565e07040ef1] +description = "testing for chocolate allergy -> allergic to chocolate and something else" + +[f5506893-f1ae-482a-b516-7532ba5ca9d2] +description = "testing for chocolate allergy -> allergic to something, but not chocolate" + +[02debb3d-d7e2-4376-a26b-3c974b6595c6] +description = "testing for chocolate allergy -> allergic to everything" + +[17f4a42b-c91e-41b8-8a76-4797886c2d96] +description = "testing for pollen allergy -> not allergic to anything" + +[7696eba7-1837-4488-882a-14b7b4e3e399] +description = "testing for pollen allergy -> allergic only to pollen" + +[9a49aec5-fa1f-405d-889e-4dfc420db2b6] +description = "testing for pollen allergy -> allergic to pollen and something else" + +[3cb8e79f-d108-4712-b620-aa146b1954a9] +description = "testing for pollen allergy -> allergic to something, but not pollen" + +[1dc3fe57-7c68-4043-9d51-5457128744b2] +description = "testing for pollen allergy -> allergic to everything" + +[d3f523d6-3d50-419b-a222-d4dfd62ce314] +description = "testing for cats allergy -> not allergic to anything" + +[eba541c3-c886-42d3-baef-c048cb7fcd8f] +description = "testing for cats allergy -> allergic only to cats" + +[ba718376-26e0-40b7-bbbe-060287637ea5] +description = "testing for cats allergy -> allergic to cats and something else" + +[3c6dbf4a-5277-436f-8b88-15a206f2d6c4] +description = "testing for cats allergy -> allergic to something, but not cats" + +[1faabb05-2b98-4995-9046-d83e4a48a7c1] +description = "testing for cats allergy -> allergic to everything" + +[f9c1b8e7-7dc5-4887-aa93-cebdcc29dd8f] +description = "list when: -> no allergies" + +[9e1a4364-09a6-4d94-990f-541a94a4c1e8] +description = "list when: -> just eggs" + +[8851c973-805e-4283-9e01-d0c0da0e4695] +description = "list when: -> just peanuts" + +[2c8943cb-005e-435f-ae11-3e8fb558ea98] +description = "list when: -> just strawberries" + +[6fa95d26-044c-48a9-8a7b-9ee46ec32c5c] +description = "list when: -> eggs and peanuts" + +[19890e22-f63f-4c5c-a9fb-fb6eacddfe8e] +description = "list when: -> more than eggs but not peanuts" + +[4b68f470-067c-44e4-889f-c9fe28917d2f] +description = "list when: -> lots of stuff" + +[0881b7c5-9efa-4530-91bd-68370d054bc7] +description = "list when: -> everything" + +[12ce86de-b347-42a0-ab7c-2e0570f0c65b] +description = "list when: -> no allergen score parts" + +[93c2df3e-4f55-4fed-8116-7513092819cd] +description = "list when: -> no allergen score parts without highest valid score" diff --git a/exercises/practice/allergies/allergies/allergies-tests.factor b/exercises/practice/allergies/allergies/allergies-tests.factor new file mode 100644 index 0000000..304017f --- /dev/null +++ b/exercises/practice/allergies/allergies/allergies-tests.factor @@ -0,0 +1,208 @@ +USING: allergies io kernel lexer tools.test unicode ; +IN: allergies.tests + +: STOP-HERE ( -- ) lexer get [ text>> length ] keep line<< ; parsing + +"Allergies:" print + +"not allergic to anything" print +{ f } +[ 0 "eggs" allergic-to ] unit-test + +STOP-HERE + +"allergic only to eggs" print +{ t } +[ 1 "eggs" allergic-to ] unit-test + +"allergic to eggs and something else" print +{ t } +[ 3 "eggs" allergic-to ] unit-test + +"allergic to something, but not eggs" print +{ f } +[ 2 "eggs" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "eggs" allergic-to ] unit-test + +"not allergic to anything" print +{ f } +[ 0 "peanuts" allergic-to ] unit-test + +"allergic only to peanuts" print +{ t } +[ 2 "peanuts" allergic-to ] unit-test + +"allergic to peanuts and something else" print +{ t } +[ 7 "peanuts" allergic-to ] unit-test + +"allergic to something, but not peanuts" print +{ f } +[ 5 "peanuts" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "peanuts" allergic-to ] unit-test + +"not allergic to anything" print +{ f } +[ 0 "shellfish" allergic-to ] unit-test + +"allergic only to shellfish" print +{ t } +[ 4 "shellfish" allergic-to ] unit-test + +"allergic to shellfish and something else" print +{ t } +[ 14 "shellfish" allergic-to ] unit-test + +"allergic to something, but not shellfish" print +{ f } +[ 10 "shellfish" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "shellfish" allergic-to ] unit-test + +"not allergic to anything" print +{ f } +[ 0 "strawberries" allergic-to ] unit-test + +"allergic only to strawberries" print +{ t } +[ 8 "strawberries" allergic-to ] unit-test + +"allergic to strawberries and something else" print +{ t } +[ 28 "strawberries" allergic-to ] unit-test + +"allergic to something, but not strawberries" print +{ f } +[ 20 "strawberries" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "strawberries" allergic-to ] unit-test + +"not allergic to anything" print +{ f } +[ 0 "tomatoes" allergic-to ] unit-test + +"allergic only to tomatoes" print +{ t } +[ 16 "tomatoes" allergic-to ] unit-test + +"allergic to tomatoes and something else" print +{ t } +[ 56 "tomatoes" allergic-to ] unit-test + +"allergic to something, but not tomatoes" print +{ f } +[ 40 "tomatoes" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "tomatoes" allergic-to ] unit-test + +"not allergic to anything" print +{ f } +[ 0 "chocolate" allergic-to ] unit-test + +"allergic only to chocolate" print +{ t } +[ 32 "chocolate" allergic-to ] unit-test + +"allergic to chocolate and something else" print +{ t } +[ 112 "chocolate" allergic-to ] unit-test + +"allergic to something, but not chocolate" print +{ f } +[ 80 "chocolate" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "chocolate" allergic-to ] unit-test + +"not allergic to anything" print +{ f } +[ 0 "pollen" allergic-to ] unit-test + +"allergic only to pollen" print +{ t } +[ 64 "pollen" allergic-to ] unit-test + +"allergic to pollen and something else" print +{ t } +[ 224 "pollen" allergic-to ] unit-test + +"allergic to something, but not pollen" print +{ f } +[ 160 "pollen" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "pollen" allergic-to ] unit-test + +"not allergic to anything" print +{ f } +[ 0 "cats" allergic-to ] unit-test + +"allergic only to cats" print +{ t } +[ 128 "cats" allergic-to ] unit-test + +"allergic to cats and something else" print +{ t } +[ 192 "cats" allergic-to ] unit-test + +"allergic to something, but not cats" print +{ f } +[ 64 "cats" allergic-to ] unit-test + +"allergic to everything" print +{ t } +[ 255 "cats" allergic-to ] unit-test + +"no allergies" print +{ HS{ } } +[ 0 allergens ] unit-test + +"just eggs" print +{ HS{ "eggs" } } +[ 1 allergens ] unit-test + +"just peanuts" print +{ HS{ "peanuts" } } +[ 2 allergens ] unit-test + +"just strawberries" print +{ HS{ "strawberries" } } +[ 8 allergens ] unit-test + +"eggs and peanuts" print +{ HS{ "eggs" "peanuts" } } +[ 3 allergens ] unit-test + +"more than eggs but not peanuts" print +{ HS{ "eggs" "shellfish" } } +[ 5 allergens ] unit-test + +"lots of stuff" print +{ HS{ "strawberries" "tomatoes" "chocolate" "pollen" "cats" } } +[ 248 allergens ] unit-test + +"everything" print +{ HS{ "eggs" "peanuts" "shellfish" "strawberries" "tomatoes" "chocolate" "pollen" "cats" } } +[ 255 allergens ] unit-test + +"no allergen score parts" print +{ HS{ "eggs" "shellfish" "strawberries" "tomatoes" "chocolate" "pollen" "cats" } } +[ 509 allergens ] unit-test + +"no allergen score parts without highest valid score" print +{ HS{ "eggs" } } +[ 257 allergens ] unit-test diff --git a/exercises/practice/allergies/allergies/allergies.factor b/exercises/practice/allergies/allergies/allergies.factor new file mode 100644 index 0000000..57106e1 --- /dev/null +++ b/exercises/practice/allergies/allergies/allergies.factor @@ -0,0 +1,8 @@ +USING: kernel ; +IN: allergies + +: allergens ( score -- set ) + "unimplemented" throw ; + +: allergic-to ( score item -- ? ) + "unimplemented" throw ; diff --git a/exercises/practice/change/.docs/instructions.md b/exercises/practice/change/.docs/instructions.md new file mode 100644 index 0000000..5887f4c --- /dev/null +++ b/exercises/practice/change/.docs/instructions.md @@ -0,0 +1,8 @@ +# Instructions + +Determine the fewest number of coins to give a customer so that the sum of their values equals the correct amount of change. + +## Examples + +- An amount of 15 with available coin values [1, 5, 10, 25, 100] should return one coin of value 5 and one coin of value 10, or [5, 10]. +- An amount of 40 with available coin values [1, 5, 10, 25, 100] should return one coin of value 5, one coin of value 10, and one coin of value 25, or [5, 10, 25]. diff --git a/exercises/practice/change/.docs/introduction.md b/exercises/practice/change/.docs/introduction.md new file mode 100644 index 0000000..b4f8308 --- /dev/null +++ b/exercises/practice/change/.docs/introduction.md @@ -0,0 +1,26 @@ +# Introduction + +In the mystical village of Coinholt, you stand behind the counter of your bakery, arranging a fresh batch of pastries. +The door creaks open, and in walks Denara, a skilled merchant with a keen eye for quality goods. +After a quick meal, she slides a shimmering coin across the counter, representing a value of 100 units. + +You smile, taking the coin, and glance at the total cost of the meal: 88 units. +That means you need to return 12 units in change. + +Denara holds out her hand expectantly. +"Just give me the fewest coins," she says with a smile. +"My pouch is already full, and I don't want to risk losing them on the road." + +You know you have a few options. +"We have Lumis (worth 10 units), Viras (worth 5 units), and Zenth (worth 2 units) available for change." + +You quickly calculate the possibilities in your head: + +- one Lumis (1 × 10 units) + one Zenth (1 × 2 units) = 2 coins total +- two Viras (2 × 5 units) + one Zenth (1 × 2 units) = 3 coins total +- six Zenth (6 × 2 units) = 6 coins total + +"The best choice is two coins: one Lumis and one Zenth," you say, handing her the change. + +Denara smiles, clearly impressed. +"As always, you've got it right." diff --git a/exercises/practice/change/.meta/config.json b/exercises/practice/change/.meta/config.json new file mode 100644 index 0000000..a23fdd1 --- /dev/null +++ b/exercises/practice/change/.meta/config.json @@ -0,0 +1,19 @@ +{ + "authors": [ + "keiravillekode" + ], + "files": { + "solution": [ + "change/change.factor" + ], + "test": [ + "change/change-tests.factor" + ], + "example": [ + ".meta/example.factor" + ] + }, + "blurb": "Correctly determine change to be given using the least number of coins.", + "source": "Software Craftsmanship - Coin Change Kata", + "source_url": "https://web.archive.org/web/20130115115225/http://craftsmanship.sv.cmu.edu:80/exercises/coin-change-kata" +} diff --git a/exercises/practice/change/.meta/example.factor b/exercises/practice/change/.meta/example.factor new file mode 100644 index 0000000..586f0f9 --- /dev/null +++ b/exercises/practice/change/.meta/example.factor @@ -0,0 +1,31 @@ +USING: arrays kernel locals math ranges sequences sets sorting ; +IN: change + +ERROR: cannot-make-change ; + +:: find-fewest-coins ( coins target -- result ) + target 0 < [ cannot-make-change ] when + target zero? [ { } ] [ + target 1 + f :> best + { } 0 best set-nth + coins members sort :> coin-list + 1 target [a..b] [| amount | + f :> winner! + coin-list [| coin | + amount coin - :> rest + rest 0 >= [ + rest best nth :> sub + sub f = not [ + sub coin suffix :> cand + winner f = + [ cand winner! ] + [ cand length winner length < [ cand winner! ] when ] if + ] when + ] when + ] each + winner amount best set-nth + ] each + target best nth :> answer + answer f = [ cannot-make-change ] when + answer sort + ] if ; diff --git a/exercises/practice/change/.meta/generator.jl b/exercises/practice/change/.meta/generator.jl new file mode 100644 index 0000000..f1c8120 --- /dev/null +++ b/exercises/practice/change/.meta/generator.jl @@ -0,0 +1,18 @@ +module Change + +function gen_test_case(case) + coins = case["input"]["coins"] + target = case["input"]["target"] + expected = case["expected"] + + coins_str = isempty(coins) ? "HS{ }" : "HS{ " * join(coins, " ") * " }" + + if expected isa AbstractDict && haskey(expected, "error") + return """[ $(coins_str) $(target) find-fewest-coins ] [ cannot-make-change? ] must-fail-with""" + end + + expected_str = isempty(expected) ? "{ }" : "{ " * join(expected, " ") * " }" + return """{ $(expected_str) }\n[ $(coins_str) $(target) find-fewest-coins ] unit-test""" +end + +end diff --git a/exercises/practice/change/.meta/tests.toml b/exercises/practice/change/.meta/tests.toml new file mode 100644 index 0000000..2d2f44b --- /dev/null +++ b/exercises/practice/change/.meta/tests.toml @@ -0,0 +1,49 @@ +# This is an auto-generated file. +# +# Regenerating this file via `configlet sync` will: +# - Recreate every `description` key/value pair +# - Recreate every `reimplements` key/value pair, where they exist in problem-specifications +# - Remove any `include = true` key/value pair (an omitted `include` key implies inclusion) +# - Preserve any other key/value pair +# +# As user-added comments (using the # character) will be removed when this file +# is regenerated, comments can be added via a `comment` key. + +[d0ebd0e1-9d27-4609-a654-df5c0ba1d83a] +description = "change for 1 cent" + +[36887bea-7f92-4a9c-b0cc-c0e886b3ecc8] +description = "single coin change" + +[cef21ccc-0811-4e6e-af44-f011e7eab6c6] +description = "multiple coin change" + +[d60952bc-0c1a-4571-bf0c-41be72690cb3] +description = "change with Lilliputian Coins" + +[408390b9-fafa-4bb9-b608-ffe6036edb6c] +description = "change with Lower Elbonia Coins" + +[7421a4cb-1c48-4bf9-99c7-7f049689132f] +description = "large target values" + +[f79d2e9b-0ae3-4d6a-bb58-dc978b0dba28] +description = "possible change without unit coins available" + +[9a166411-d35d-4f7f-a007-6724ac266178] +description = "another possible change without unit coins available" + +[ce0f80d5-51c3-469d-818c-3e69dbd25f75] +description = "a greedy approach is not optimal" + +[bbbcc154-e9e9-4209-a4db-dd6d81ec26bb] +description = "no coins make 0 change" + +[c8b81d5a-49bd-4b61-af73-8ee5383a2ce1] +description = "error testing for change smaller than the smallest of coins" + +[3c43e3e4-63f9-46ac-9476-a67516e98f68] +description = "error if no combination can add up to target" + +[8fe1f076-9b2d-4f44-89fe-8a6ccd63c8f3] +description = "cannot find negative change values" diff --git a/exercises/practice/change/change/change-tests.factor b/exercises/practice/change/change/change-tests.factor new file mode 100644 index 0000000..7e06066 --- /dev/null +++ b/exercises/practice/change/change/change-tests.factor @@ -0,0 +1,57 @@ +USING: change io kernel lexer tools.test unicode ; +IN: change.tests + +: STOP-HERE ( -- ) lexer get [ text>> length ] keep line<< ; parsing + +"Change:" print + +"change for 1 cent" print +{ { 1 } } +[ HS{ 1 5 10 25 } 1 find-fewest-coins ] unit-test + +STOP-HERE + +"single coin change" print +{ { 25 } } +[ HS{ 1 5 10 25 100 } 25 find-fewest-coins ] unit-test + +"multiple coin change" print +{ { 5 10 } } +[ HS{ 1 5 10 25 100 } 15 find-fewest-coins ] unit-test + +"change with Lilliputian Coins" print +{ { 4 4 15 } } +[ HS{ 1 4 15 20 50 } 23 find-fewest-coins ] unit-test + +"change with Lower Elbonia Coins" print +{ { 21 21 21 } } +[ HS{ 1 5 10 21 25 } 63 find-fewest-coins ] unit-test + +"large target values" print +{ { 2 2 5 20 20 50 100 100 100 100 100 100 100 100 100 } } +[ HS{ 1 2 5 10 20 50 100 } 999 find-fewest-coins ] unit-test + +"possible change without unit coins available" print +{ { 2 2 2 5 10 } } +[ HS{ 2 5 10 20 50 } 21 find-fewest-coins ] unit-test + +"another possible change without unit coins available" print +{ { 4 4 4 5 5 5 } } +[ HS{ 4 5 } 27 find-fewest-coins ] unit-test + +"a greedy approach is not optimal" print +{ { 10 10 } } +[ HS{ 1 10 11 } 20 find-fewest-coins ] unit-test + +"no coins make 0 change" print +{ { } } +[ HS{ 1 5 10 21 25 } 0 find-fewest-coins ] unit-test + +"error testing for change smaller than the smallest of coins" print +[ HS{ 5 10 } 3 find-fewest-coins ] [ cannot-make-change? ] must-fail-with + +"error if no combination can add up to target" print +[ HS{ 5 10 } 94 find-fewest-coins ] [ cannot-make-change? ] must-fail-with + +"cannot find negative change values" print +[ HS{ 1 2 5 } -5 find-fewest-coins ] [ cannot-make-change? ] must-fail-with diff --git a/exercises/practice/change/change/change.factor b/exercises/practice/change/change/change.factor new file mode 100644 index 0000000..2bf13cb --- /dev/null +++ b/exercises/practice/change/change/change.factor @@ -0,0 +1,7 @@ +USING: kernel ; +IN: change + +ERROR: cannot-make-change ; + +: find-fewest-coins ( coins target -- result ) + "unimplemented" throw ; diff --git a/exercises/practice/raindrops/.meta/example.factor b/exercises/practice/raindrops/.meta/example.factor index c58cb67..2bd907d 100644 --- a/exercises/practice/raindrops/.meta/example.factor +++ b/exercises/practice/raindrops/.meta/example.factor @@ -6,5 +6,5 @@ IN: raindrops [ 3 divisor? "Pling" and ] [ 5 divisor? "Plang" and ] [ 7 divisor? "Plong" and ] tri - 3array sift + 3array [ ] filter [ number>string ] [ concat swap drop ] if-empty ; diff --git a/exercises/practice/run-length-encoding/.meta/example.factor b/exercises/practice/run-length-encoding/.meta/example.factor index edb7353..0f4f1db 100644 --- a/exercises/practice/run-length-encoding/.meta/example.factor +++ b/exercises/practice/run-length-encoding/.meta/example.factor @@ -1,4 +1,4 @@ -USING: grouping kernel locals math math.parser sequences +USING: arrays grouping kernel locals math math.parser sequences splitting.monotonic strings unicode ; IN: run-length-encoding @@ -17,7 +17,7 @@ IN: run-length-encoding n 10 * ch CHAR: 0 - + n! ] [ n 0 = [ 1 n! ] when - acc n ch 1string concat append acc! + acc n ch >string append acc! 0 n! ] if ] each